1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "gc/shared/cardTableBarrierSet.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/objectMonitor.hpp" 35 #include "runtime/os.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "utilities/macros.hpp" 39 40 #ifdef PRODUCT 41 #define BLOCK_COMMENT(str) /* nothing */ 42 #define STOP(error) stop(error) 43 #else 44 #define BLOCK_COMMENT(str) block_comment(str) 45 #define STOP(error) block_comment(error); stop(error) 46 #endif 47 48 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 49 // Implementation of AddressLiteral 50 51 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms. 52 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = { 53 // -----------------Table 4.5 -------------------- // 54 16, 32, 64, // EVEX_FV(0) 55 4, 4, 4, // EVEX_FV(1) - with Evex.b 56 16, 32, 64, // EVEX_FV(2) - with Evex.w 57 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b 58 8, 16, 32, // EVEX_HV(0) 59 4, 4, 4, // EVEX_HV(1) - with Evex.b 60 // -----------------Table 4.6 -------------------- // 61 16, 32, 64, // EVEX_FVM(0) 62 1, 1, 1, // EVEX_T1S(0) 63 2, 2, 2, // EVEX_T1S(1) 64 4, 4, 4, // EVEX_T1S(2) 65 8, 8, 8, // EVEX_T1S(3) 66 4, 4, 4, // EVEX_T1F(0) 67 8, 8, 8, // EVEX_T1F(1) 68 8, 8, 8, // EVEX_T2(0) 69 0, 16, 16, // EVEX_T2(1) 70 0, 16, 16, // EVEX_T4(0) 71 0, 0, 32, // EVEX_T4(1) 72 0, 0, 32, // EVEX_T8(0) 73 8, 16, 32, // EVEX_HVM(0) 74 4, 8, 16, // EVEX_QVM(0) 75 2, 4, 8, // EVEX_OVM(0) 76 16, 16, 16, // EVEX_M128(0) 77 8, 32, 64, // EVEX_DUP(0) 78 0, 0, 0 // EVEX_NTUP 79 }; 80 81 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { 82 _is_lval = false; 83 _target = target; 84 switch (rtype) { 85 case relocInfo::oop_type: 86 case relocInfo::metadata_type: 87 // Oops are a special case. Normally they would be their own section 88 // but in cases like icBuffer they are literals in the code stream that 89 // we don't have a section for. We use none so that we get a literal address 90 // which is always patchable. 91 break; 92 case relocInfo::external_word_type: 93 _rspec = external_word_Relocation::spec(target); 94 break; 95 case relocInfo::internal_word_type: 96 _rspec = internal_word_Relocation::spec(target); 97 break; 98 case relocInfo::opt_virtual_call_type: 99 _rspec = opt_virtual_call_Relocation::spec(); 100 break; 101 case relocInfo::static_call_type: 102 _rspec = static_call_Relocation::spec(); 103 break; 104 case relocInfo::runtime_call_type: 105 _rspec = runtime_call_Relocation::spec(); 106 break; 107 case relocInfo::poll_type: 108 case relocInfo::poll_return_type: 109 _rspec = Relocation::spec_simple(rtype); 110 break; 111 case relocInfo::none: 112 break; 113 default: 114 ShouldNotReachHere(); 115 break; 116 } 117 } 118 119 // Implementation of Address 120 121 #ifdef _LP64 122 123 Address Address::make_array(ArrayAddress adr) { 124 // Not implementable on 64bit machines 125 // Should have been handled higher up the call chain. 126 ShouldNotReachHere(); 127 return Address(); 128 } 129 130 // exceedingly dangerous constructor 131 Address::Address(int disp, address loc, relocInfo::relocType rtype) { 132 _base = noreg; 133 _index = noreg; 134 _scale = no_scale; 135 _disp = disp; 136 _xmmindex = xnoreg; 137 _isxmmindex = false; 138 switch (rtype) { 139 case relocInfo::external_word_type: 140 _rspec = external_word_Relocation::spec(loc); 141 break; 142 case relocInfo::internal_word_type: 143 _rspec = internal_word_Relocation::spec(loc); 144 break; 145 case relocInfo::runtime_call_type: 146 // HMM 147 _rspec = runtime_call_Relocation::spec(); 148 break; 149 case relocInfo::poll_type: 150 case relocInfo::poll_return_type: 151 _rspec = Relocation::spec_simple(rtype); 152 break; 153 case relocInfo::none: 154 break; 155 default: 156 ShouldNotReachHere(); 157 } 158 } 159 #else // LP64 160 161 Address Address::make_array(ArrayAddress adr) { 162 AddressLiteral base = adr.base(); 163 Address index = adr.index(); 164 assert(index._disp == 0, "must not have disp"); // maybe it can? 165 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); 166 array._rspec = base._rspec; 167 return array; 168 } 169 170 // exceedingly dangerous constructor 171 Address::Address(address loc, RelocationHolder spec) { 172 _base = noreg; 173 _index = noreg; 174 _scale = no_scale; 175 _disp = (intptr_t) loc; 176 _rspec = spec; 177 _xmmindex = xnoreg; 178 _isxmmindex = false; 179 } 180 181 #endif // _LP64 182 183 184 185 // Convert the raw encoding form into the form expected by the constructor for 186 // Address. An index of 4 (rsp) corresponds to having no index, so convert 187 // that to noreg for the Address constructor. 188 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 189 RelocationHolder rspec; 190 if (disp_reloc != relocInfo::none) { 191 rspec = Relocation::spec_simple(disp_reloc); 192 } 193 bool valid_index = index != rsp->encoding(); 194 if (valid_index) { 195 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 196 madr._rspec = rspec; 197 return madr; 198 } else { 199 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 200 madr._rspec = rspec; 201 return madr; 202 } 203 } 204 205 // Implementation of Assembler 206 207 int AbstractAssembler::code_fill_byte() { 208 return (u_char)'\xF4'; // hlt 209 } 210 211 // make this go away someday 212 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { 213 if (rtype == relocInfo::none) 214 emit_int32(data); 215 else 216 emit_data(data, Relocation::spec_simple(rtype), format); 217 } 218 219 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { 220 assert(imm_operand == 0, "default format must be immediate in this file"); 221 assert(inst_mark() != NULL, "must be inside InstructionMark"); 222 if (rspec.type() != relocInfo::none) { 223 #ifdef ASSERT 224 check_relocation(rspec, format); 225 #endif 226 // Do not use AbstractAssembler::relocate, which is not intended for 227 // embedded words. Instead, relocate to the enclosing instruction. 228 229 // hack. call32 is too wide for mask so use disp32 230 if (format == call32_operand) 231 code_section()->relocate(inst_mark(), rspec, disp32_operand); 232 else 233 code_section()->relocate(inst_mark(), rspec, format); 234 } 235 emit_int32(data); 236 } 237 238 static int encode(Register r) { 239 int enc = r->encoding(); 240 if (enc >= 8) { 241 enc -= 8; 242 } 243 return enc; 244 } 245 246 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { 247 assert(dst->has_byte_register(), "must have byte register"); 248 assert(isByte(op1) && isByte(op2), "wrong opcode"); 249 assert(isByte(imm8), "not a byte"); 250 assert((op1 & 0x01) == 0, "should be 8bit operation"); 251 emit_int24(op1, (op2 | encode(dst)), imm8); 252 } 253 254 255 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { 256 assert(isByte(op1) && isByte(op2), "wrong opcode"); 257 assert((op1 & 0x01) == 1, "should be 32bit operation"); 258 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 259 if (is8bit(imm32)) { 260 emit_int24(op1 | 0x02, // set sign bit 261 op2 | encode(dst), 262 imm32 & 0xFF); 263 } else { 264 emit_int16(op1, (op2 | encode(dst))); 265 emit_int32(imm32); 266 } 267 } 268 269 // Force generation of a 4 byte immediate value even if it fits into 8bit 270 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { 271 assert(isByte(op1) && isByte(op2), "wrong opcode"); 272 assert((op1 & 0x01) == 1, "should be 32bit operation"); 273 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 274 emit_int16(op1, (op2 | encode(dst))); 275 emit_int32(imm32); 276 } 277 278 // immediate-to-memory forms 279 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { 280 assert((op1 & 0x01) == 1, "should be 32bit operation"); 281 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 282 if (is8bit(imm32)) { 283 emit_int8(op1 | 0x02); // set sign bit 284 emit_operand(rm, adr, 1); 285 emit_int8(imm32 & 0xFF); 286 } else { 287 emit_int8(op1); 288 emit_operand(rm, adr, 4); 289 emit_int32(imm32); 290 } 291 } 292 293 294 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { 295 assert(isByte(op1) && isByte(op2), "wrong opcode"); 296 emit_int16(op1, (op2 | encode(dst) << 3 | encode(src))); 297 } 298 299 300 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 301 int cur_tuple_type, int in_size_in_bits, int cur_encoding) { 302 int mod_idx = 0; 303 // We will test if the displacement fits the compressed format and if so 304 // apply the compression to the displacment iff the result is8bit. 305 if (VM_Version::supports_evex() && is_evex_inst) { 306 switch (cur_tuple_type) { 307 case EVEX_FV: 308 if ((cur_encoding & VEX_W) == VEX_W) { 309 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 310 } else { 311 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 312 } 313 break; 314 315 case EVEX_HV: 316 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 317 break; 318 319 case EVEX_FVM: 320 break; 321 322 case EVEX_T1S: 323 switch (in_size_in_bits) { 324 case EVEX_8bit: 325 break; 326 327 case EVEX_16bit: 328 mod_idx = 1; 329 break; 330 331 case EVEX_32bit: 332 mod_idx = 2; 333 break; 334 335 case EVEX_64bit: 336 mod_idx = 3; 337 break; 338 } 339 break; 340 341 case EVEX_T1F: 342 case EVEX_T2: 343 case EVEX_T4: 344 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0; 345 break; 346 347 case EVEX_T8: 348 break; 349 350 case EVEX_HVM: 351 break; 352 353 case EVEX_QVM: 354 break; 355 356 case EVEX_OVM: 357 break; 358 359 case EVEX_M128: 360 break; 361 362 case EVEX_DUP: 363 break; 364 365 default: 366 assert(0, "no valid evex tuple_table entry"); 367 break; 368 } 369 370 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 371 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len]; 372 if ((disp % disp_factor) == 0) { 373 int new_disp = disp / disp_factor; 374 if ((-0x80 <= new_disp && new_disp < 0x80)) { 375 disp = new_disp; 376 } 377 } else { 378 return false; 379 } 380 } 381 } 382 return (-0x80 <= disp && disp < 0x80); 383 } 384 385 386 bool Assembler::emit_compressed_disp_byte(int &disp) { 387 int mod_idx = 0; 388 // We will test if the displacement fits the compressed format and if so 389 // apply the compression to the displacment iff the result is8bit. 390 if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) { 391 int evex_encoding = _attributes->get_evex_encoding(); 392 int tuple_type = _attributes->get_tuple_type(); 393 switch (tuple_type) { 394 case EVEX_FV: 395 if ((evex_encoding & VEX_W) == VEX_W) { 396 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 397 } else { 398 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 399 } 400 break; 401 402 case EVEX_HV: 403 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 404 break; 405 406 case EVEX_FVM: 407 break; 408 409 case EVEX_T1S: 410 switch (_attributes->get_input_size()) { 411 case EVEX_8bit: 412 break; 413 414 case EVEX_16bit: 415 mod_idx = 1; 416 break; 417 418 case EVEX_32bit: 419 mod_idx = 2; 420 break; 421 422 case EVEX_64bit: 423 mod_idx = 3; 424 break; 425 } 426 break; 427 428 case EVEX_T1F: 429 case EVEX_T2: 430 case EVEX_T4: 431 mod_idx = (_attributes->get_input_size() == EVEX_64bit) ? 1 : 0; 432 break; 433 434 case EVEX_T8: 435 break; 436 437 case EVEX_HVM: 438 break; 439 440 case EVEX_QVM: 441 break; 442 443 case EVEX_OVM: 444 break; 445 446 case EVEX_M128: 447 break; 448 449 case EVEX_DUP: 450 break; 451 452 default: 453 assert(0, "no valid evex tuple_table entry"); 454 break; 455 } 456 457 int vector_len = _attributes->get_vector_len(); 458 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 459 int disp_factor = tuple_table[tuple_type + mod_idx][vector_len]; 460 if ((disp % disp_factor) == 0) { 461 int new_disp = disp / disp_factor; 462 if (is8bit(new_disp)) { 463 disp = new_disp; 464 } 465 } else { 466 return false; 467 } 468 } 469 } 470 return is8bit(disp); 471 } 472 473 static bool is_valid_encoding(int reg_enc) { 474 return reg_enc >= 0; 475 } 476 477 static int raw_encode(Register reg) { 478 assert(reg == noreg || reg->is_valid(), "sanity"); 479 int reg_enc = (intptr_t)reg; 480 assert(reg_enc == -1 || is_valid_encoding(reg_enc), "sanity"); 481 return reg_enc; 482 } 483 484 static int raw_encode(XMMRegister xmmreg) { 485 assert(xmmreg == xnoreg || xmmreg->is_valid(), "sanity"); 486 int xmmreg_enc = (intptr_t)xmmreg; 487 assert(xmmreg_enc == -1 || is_valid_encoding(xmmreg_enc), "sanity"); 488 return xmmreg_enc; 489 } 490 491 static int modrm_encoding(int mod, int dst_enc, int src_enc) { 492 return (mod & 3) << 6 | (dst_enc & 7) << 3 | (src_enc & 7); 493 } 494 495 static int sib_encoding(Address::ScaleFactor scale, int index_enc, int base_enc) { 496 return (scale & 3) << 6 | (index_enc & 7) << 3 | (base_enc & 7); 497 } 498 499 inline void Assembler::emit_modrm(int mod, int dst_enc, int src_enc) { 500 assert((mod & 3) != 0b11, "forbidden"); 501 int modrm = modrm_encoding(mod, dst_enc, src_enc); 502 emit_int8(modrm); 503 } 504 505 inline void Assembler::emit_modrm_disp8(int mod, int dst_enc, int src_enc, 506 int disp) { 507 int modrm = modrm_encoding(mod, dst_enc, src_enc); 508 emit_int16(modrm, disp & 0xFF); 509 } 510 511 inline void Assembler::emit_modrm_sib(int mod, int dst_enc, int src_enc, 512 Address::ScaleFactor scale, int index_enc, int base_enc) { 513 int modrm = modrm_encoding(mod, dst_enc, src_enc); 514 int sib = sib_encoding(scale, index_enc, base_enc); 515 emit_int16(modrm, sib); 516 } 517 518 inline void Assembler::emit_modrm_sib_disp8(int mod, int dst_enc, int src_enc, 519 Address::ScaleFactor scale, int index_enc, int base_enc, 520 int disp) { 521 int modrm = modrm_encoding(mod, dst_enc, src_enc); 522 int sib = sib_encoding(scale, index_enc, base_enc); 523 emit_int24(modrm, sib, disp & 0xFF); 524 } 525 526 void Assembler::emit_operand_helper(int reg_enc, int base_enc, int index_enc, 527 Address::ScaleFactor scale, int disp, 528 RelocationHolder const& rspec, 529 int rip_relative_correction) { 530 bool no_relocation = (rspec.type() == relocInfo::none); 531 532 if (is_valid_encoding(base_enc)) { 533 if (is_valid_encoding(index_enc)) { 534 assert(scale != Address::no_scale, "inconsistent address"); 535 // [base + index*scale + disp] 536 if (disp == 0 && no_relocation && 537 base_enc != rbp->encoding() LP64_ONLY(&& base_enc != r13->encoding())) { 538 // [base + index*scale] 539 // [00 reg 100][ss index base] 540 emit_modrm_sib(0b00, reg_enc, 0b100, 541 scale, index_enc, base_enc); 542 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 543 // [base + index*scale + imm8] 544 // [01 reg 100][ss index base] imm8 545 emit_modrm_sib_disp8(0b01, reg_enc, 0b100, 546 scale, index_enc, base_enc, 547 disp); 548 } else { 549 // [base + index*scale + disp32] 550 // [10 reg 100][ss index base] disp32 551 emit_modrm_sib(0b10, reg_enc, 0b100, 552 scale, index_enc, base_enc); 553 emit_data(disp, rspec, disp32_operand); 554 } 555 } else if (base_enc == rsp->encoding() LP64_ONLY(|| base_enc == r12->encoding())) { 556 // [rsp + disp] 557 if (disp == 0 && no_relocation) { 558 // [rsp] 559 // [00 reg 100][00 100 100] 560 emit_modrm_sib(0b00, reg_enc, 0b100, 561 Address::times_1, 0b100, 0b100); 562 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 563 // [rsp + imm8] 564 // [01 reg 100][00 100 100] disp8 565 emit_modrm_sib_disp8(0b01, reg_enc, 0b100, 566 Address::times_1, 0b100, 0b100, 567 disp); 568 } else { 569 // [rsp + imm32] 570 // [10 reg 100][00 100 100] disp32 571 emit_modrm_sib(0b10, reg_enc, 0b100, 572 Address::times_1, 0b100, 0b100); 573 emit_data(disp, rspec, disp32_operand); 574 } 575 } else { 576 // [base + disp] 577 assert(base_enc != rsp->encoding() LP64_ONLY(&& base_enc != r12->encoding()), "illegal addressing mode"); 578 if (disp == 0 && no_relocation && 579 base_enc != rbp->encoding() LP64_ONLY(&& base_enc != r13->encoding())) { 580 // [base] 581 // [00 reg base] 582 emit_modrm(0, reg_enc, base_enc); 583 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 584 // [base + disp8] 585 // [01 reg base] disp8 586 emit_modrm_disp8(0b01, reg_enc, base_enc, 587 disp); 588 } else { 589 // [base + disp32] 590 // [10 reg base] disp32 591 emit_modrm(0b10, reg_enc, base_enc); 592 emit_data(disp, rspec, disp32_operand); 593 } 594 } 595 } else { 596 if (is_valid_encoding(index_enc)) { 597 assert(scale != Address::no_scale, "inconsistent address"); 598 // base == noreg 599 // [index*scale + disp] 600 // [00 reg 100][ss index 101] disp32 601 emit_modrm_sib(0b00, reg_enc, 0b100, 602 scale, index_enc, 0b101 /* no base */); 603 emit_data(disp, rspec, disp32_operand); 604 } else if (!no_relocation) { 605 // base == noreg, index == noreg 606 // [disp] (64bit) RIP-RELATIVE (32bit) abs 607 // [00 reg 101] disp32 608 609 emit_modrm(0b00, reg_enc, 0b101 /* no base */); 610 // Note that the RIP-rel. correction applies to the generated 611 // disp field, but _not_ to the target address in the rspec. 612 613 // disp was created by converting the target address minus the pc 614 // at the start of the instruction. That needs more correction here. 615 // intptr_t disp = target - next_ip; 616 assert(inst_mark() != NULL, "must be inside InstructionMark"); 617 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; 618 int64_t adjusted = disp; 619 // Do rip-rel adjustment for 64bit 620 LP64_ONLY(adjusted -= (next_ip - inst_mark())); 621 assert(is_simm32(adjusted), 622 "must be 32bit offset (RIP relative address)"); 623 emit_data((int32_t) adjusted, rspec, disp32_operand); 624 625 } else { 626 // base == noreg, index == noreg, no_relocation == true 627 // 32bit never did this, did everything as the rip-rel/disp code above 628 // [disp] ABSOLUTE 629 // [00 reg 100][00 100 101] disp32 630 emit_modrm_sib(0b00, reg_enc, 0b100 /* no base */, 631 Address::times_1, 0b100, 0b101); 632 emit_data(disp, rspec, disp32_operand); 633 } 634 } 635 } 636 637 void Assembler::emit_operand(Register reg, Register base, Register index, 638 Address::ScaleFactor scale, int disp, 639 RelocationHolder const& rspec, 640 int rip_relative_correction) { 641 assert(!index->is_valid() || index != rsp, "illegal addressing mode"); 642 emit_operand_helper(raw_encode(reg), raw_encode(base), raw_encode(index), 643 scale, disp, rspec, rip_relative_correction); 644 645 } 646 void Assembler::emit_operand(XMMRegister xmmreg, Register base, Register index, 647 Address::ScaleFactor scale, int disp, 648 RelocationHolder const& rspec) { 649 assert(!index->is_valid() || index != rsp, "illegal addressing mode"); 650 assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported"); 651 emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(index), 652 scale, disp, rspec); 653 } 654 655 void Assembler::emit_operand(XMMRegister xmmreg, Register base, XMMRegister xmmindex, 656 Address::ScaleFactor scale, int disp, 657 RelocationHolder const& rspec) { 658 assert(xmmreg->encoding() < 16 || UseAVX > 2, "not supported"); 659 assert(xmmindex->encoding() < 16 || UseAVX > 2, "not supported"); 660 emit_operand_helper(raw_encode(xmmreg), raw_encode(base), raw_encode(xmmindex), 661 scale, disp, rspec, /* rip_relative_correction */ 0); 662 } 663 664 // Secret local extension to Assembler::WhichOperand: 665 #define end_pc_operand (_WhichOperand_limit) 666 667 address Assembler::locate_operand(address inst, WhichOperand which) { 668 // Decode the given instruction, and return the address of 669 // an embedded 32-bit operand word. 670 671 // If "which" is disp32_operand, selects the displacement portion 672 // of an effective address specifier. 673 // If "which" is imm64_operand, selects the trailing immediate constant. 674 // If "which" is call32_operand, selects the displacement of a call or jump. 675 // Caller is responsible for ensuring that there is such an operand, 676 // and that it is 32/64 bits wide. 677 678 // If "which" is end_pc_operand, find the end of the instruction. 679 680 address ip = inst; 681 bool is_64bit = false; 682 683 debug_only(bool has_disp32 = false); 684 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn 685 686 again_after_prefix: 687 switch (0xFF & *ip++) { 688 689 // These convenience macros generate groups of "case" labels for the switch. 690 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 691 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ 692 case (x)+4: case (x)+5: case (x)+6: case (x)+7 693 #define REP16(x) REP8((x)+0): \ 694 case REP8((x)+8) 695 696 case CS_segment: 697 case SS_segment: 698 case DS_segment: 699 case ES_segment: 700 case FS_segment: 701 case GS_segment: 702 // Seems dubious 703 LP64_ONLY(assert(false, "shouldn't have that prefix")); 704 assert(ip == inst+1, "only one prefix allowed"); 705 goto again_after_prefix; 706 707 case 0x67: 708 case REX: 709 case REX_B: 710 case REX_X: 711 case REX_XB: 712 case REX_R: 713 case REX_RB: 714 case REX_RX: 715 case REX_RXB: 716 NOT_LP64(assert(false, "64bit prefixes")); 717 goto again_after_prefix; 718 719 case REX_W: 720 case REX_WB: 721 case REX_WX: 722 case REX_WXB: 723 case REX_WR: 724 case REX_WRB: 725 case REX_WRX: 726 case REX_WRXB: 727 NOT_LP64(assert(false, "64bit prefixes")); 728 is_64bit = true; 729 goto again_after_prefix; 730 731 case 0xFF: // pushq a; decl a; incl a; call a; jmp a 732 case 0x88: // movb a, r 733 case 0x89: // movl a, r 734 case 0x8A: // movb r, a 735 case 0x8B: // movl r, a 736 case 0x8F: // popl a 737 debug_only(has_disp32 = true); 738 break; 739 740 case 0x68: // pushq #32 741 if (which == end_pc_operand) { 742 return ip + 4; 743 } 744 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); 745 return ip; // not produced by emit_operand 746 747 case 0x66: // movw ... (size prefix) 748 again_after_size_prefix2: 749 switch (0xFF & *ip++) { 750 case REX: 751 case REX_B: 752 case REX_X: 753 case REX_XB: 754 case REX_R: 755 case REX_RB: 756 case REX_RX: 757 case REX_RXB: 758 case REX_W: 759 case REX_WB: 760 case REX_WX: 761 case REX_WXB: 762 case REX_WR: 763 case REX_WRB: 764 case REX_WRX: 765 case REX_WRXB: 766 NOT_LP64(assert(false, "64bit prefix found")); 767 goto again_after_size_prefix2; 768 case 0x8B: // movw r, a 769 case 0x89: // movw a, r 770 debug_only(has_disp32 = true); 771 break; 772 case 0xC7: // movw a, #16 773 debug_only(has_disp32 = true); 774 tail_size = 2; // the imm16 775 break; 776 case 0x0F: // several SSE/SSE2 variants 777 ip--; // reparse the 0x0F 778 goto again_after_prefix; 779 default: 780 ShouldNotReachHere(); 781 } 782 break; 783 784 case REP8(0xB8): // movl/q r, #32/#64(oop?) 785 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); 786 // these asserts are somewhat nonsensical 787 #ifndef _LP64 788 assert(which == imm_operand || which == disp32_operand, 789 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 790 #else 791 assert((which == call32_operand || which == imm_operand) && is_64bit || 792 which == narrow_oop_operand && !is_64bit, 793 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 794 #endif // _LP64 795 return ip; 796 797 case 0x69: // imul r, a, #32 798 case 0xC7: // movl a, #32(oop?) 799 tail_size = 4; 800 debug_only(has_disp32 = true); // has both kinds of operands! 801 break; 802 803 case 0x0F: // movx..., etc. 804 switch (0xFF & *ip++) { 805 case 0x3A: // pcmpestri 806 tail_size = 1; 807 case 0x38: // ptest, pmovzxbw 808 ip++; // skip opcode 809 debug_only(has_disp32 = true); // has both kinds of operands! 810 break; 811 812 case 0x70: // pshufd r, r/a, #8 813 debug_only(has_disp32 = true); // has both kinds of operands! 814 case 0x73: // psrldq r, #8 815 tail_size = 1; 816 break; 817 818 case 0x12: // movlps 819 case 0x28: // movaps 820 case 0x2E: // ucomiss 821 case 0x2F: // comiss 822 case 0x54: // andps 823 case 0x55: // andnps 824 case 0x56: // orps 825 case 0x57: // xorps 826 case 0x58: // addpd 827 case 0x59: // mulpd 828 case 0x6E: // movd 829 case 0x7E: // movd 830 case 0x6F: // movdq 831 case 0x7F: // movdq 832 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush 833 case 0xFE: // paddd 834 debug_only(has_disp32 = true); 835 break; 836 837 case 0xAD: // shrd r, a, %cl 838 case 0xAF: // imul r, a 839 case 0xBE: // movsbl r, a (movsxb) 840 case 0xBF: // movswl r, a (movsxw) 841 case 0xB6: // movzbl r, a (movzxb) 842 case 0xB7: // movzwl r, a (movzxw) 843 case REP16(0x40): // cmovl cc, r, a 844 case 0xB0: // cmpxchgb 845 case 0xB1: // cmpxchg 846 case 0xC1: // xaddl 847 case 0xC7: // cmpxchg8 848 case REP16(0x90): // setcc a 849 debug_only(has_disp32 = true); 850 // fall out of the switch to decode the address 851 break; 852 853 case 0xC4: // pinsrw r, a, #8 854 debug_only(has_disp32 = true); 855 case 0xC5: // pextrw r, r, #8 856 tail_size = 1; // the imm8 857 break; 858 859 case 0xAC: // shrd r, a, #8 860 debug_only(has_disp32 = true); 861 tail_size = 1; // the imm8 862 break; 863 864 case REP16(0x80): // jcc rdisp32 865 if (which == end_pc_operand) return ip + 4; 866 assert(which == call32_operand, "jcc has no disp32 or imm"); 867 return ip; 868 default: 869 ShouldNotReachHere(); 870 } 871 break; 872 873 case 0x81: // addl a, #32; addl r, #32 874 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 875 // on 32bit in the case of cmpl, the imm might be an oop 876 tail_size = 4; 877 debug_only(has_disp32 = true); // has both kinds of operands! 878 break; 879 880 case 0x83: // addl a, #8; addl r, #8 881 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 882 debug_only(has_disp32 = true); // has both kinds of operands! 883 tail_size = 1; 884 break; 885 886 case 0x9B: 887 switch (0xFF & *ip++) { 888 case 0xD9: // fnstcw a 889 debug_only(has_disp32 = true); 890 break; 891 default: 892 ShouldNotReachHere(); 893 } 894 break; 895 896 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a 897 case REP4(0x10): // adc... 898 case REP4(0x20): // and... 899 case REP4(0x30): // xor... 900 case REP4(0x08): // or... 901 case REP4(0x18): // sbb... 902 case REP4(0x28): // sub... 903 case 0xF7: // mull a 904 case 0x8D: // lea r, a 905 case 0x87: // xchg r, a 906 case REP4(0x38): // cmp... 907 case 0x85: // test r, a 908 debug_only(has_disp32 = true); // has both kinds of operands! 909 break; 910 911 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 912 case 0xC6: // movb a, #8 913 case 0x80: // cmpb a, #8 914 case 0x6B: // imul r, a, #8 915 debug_only(has_disp32 = true); // has both kinds of operands! 916 tail_size = 1; // the imm8 917 break; 918 919 case 0xC4: // VEX_3bytes 920 case 0xC5: // VEX_2bytes 921 assert((UseAVX > 0), "shouldn't have VEX prefix"); 922 assert(ip == inst+1, "no prefixes allowed"); 923 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions 924 // but they have prefix 0x0F and processed when 0x0F processed above. 925 // 926 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES 927 // instructions (these instructions are not supported in 64-bit mode). 928 // To distinguish them bits [7:6] are set in the VEX second byte since 929 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set 930 // those VEX bits REX and vvvv bits are inverted. 931 // 932 // Fortunately C2 doesn't generate these instructions so we don't need 933 // to check for them in product version. 934 935 // Check second byte 936 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions")); 937 938 int vex_opcode; 939 // First byte 940 if ((0xFF & *inst) == VEX_3bytes) { 941 vex_opcode = VEX_OPCODE_MASK & *ip; 942 ip++; // third byte 943 is_64bit = ((VEX_W & *ip) == VEX_W); 944 } else { 945 vex_opcode = VEX_OPCODE_0F; 946 } 947 ip++; // opcode 948 // To find the end of instruction (which == end_pc_operand). 949 switch (vex_opcode) { 950 case VEX_OPCODE_0F: 951 switch (0xFF & *ip) { 952 case 0x70: // pshufd r, r/a, #8 953 case 0x71: // ps[rl|ra|ll]w r, #8 954 case 0x72: // ps[rl|ra|ll]d r, #8 955 case 0x73: // ps[rl|ra|ll]q r, #8 956 case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8 957 case 0xC4: // pinsrw r, r, r/a, #8 958 case 0xC5: // pextrw r/a, r, #8 959 case 0xC6: // shufp[s|d] r, r, r/a, #8 960 tail_size = 1; // the imm8 961 break; 962 } 963 break; 964 case VEX_OPCODE_0F_3A: 965 tail_size = 1; 966 break; 967 } 968 ip++; // skip opcode 969 debug_only(has_disp32 = true); // has both kinds of operands! 970 break; 971 972 case 0x62: // EVEX_4bytes 973 assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix"); 974 assert(ip == inst+1, "no prefixes allowed"); 975 // no EVEX collisions, all instructions that have 0x62 opcodes 976 // have EVEX versions and are subopcodes of 0x66 977 ip++; // skip P0 and exmaine W in P1 978 is_64bit = ((VEX_W & *ip) == VEX_W); 979 ip++; // move to P2 980 ip++; // skip P2, move to opcode 981 // To find the end of instruction (which == end_pc_operand). 982 switch (0xFF & *ip) { 983 case 0x22: // pinsrd r, r/a, #8 984 case 0x61: // pcmpestri r, r/a, #8 985 case 0x70: // pshufd r, r/a, #8 986 case 0x73: // psrldq r, #8 987 tail_size = 1; // the imm8 988 break; 989 default: 990 break; 991 } 992 ip++; // skip opcode 993 debug_only(has_disp32 = true); // has both kinds of operands! 994 break; 995 996 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 997 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl 998 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a 999 case 0xDD: // fld_d a; fst_d a; fstp_d a 1000 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a 1001 case 0xDF: // fild_d a; fistp_d a 1002 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a 1003 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a 1004 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a 1005 debug_only(has_disp32 = true); 1006 break; 1007 1008 case 0xE8: // call rdisp32 1009 case 0xE9: // jmp rdisp32 1010 if (which == end_pc_operand) return ip + 4; 1011 assert(which == call32_operand, "call has no disp32 or imm"); 1012 return ip; 1013 1014 case 0xF0: // Lock 1015 goto again_after_prefix; 1016 1017 case 0xF3: // For SSE 1018 case 0xF2: // For SSE2 1019 switch (0xFF & *ip++) { 1020 case REX: 1021 case REX_B: 1022 case REX_X: 1023 case REX_XB: 1024 case REX_R: 1025 case REX_RB: 1026 case REX_RX: 1027 case REX_RXB: 1028 case REX_W: 1029 case REX_WB: 1030 case REX_WX: 1031 case REX_WXB: 1032 case REX_WR: 1033 case REX_WRB: 1034 case REX_WRX: 1035 case REX_WRXB: 1036 NOT_LP64(assert(false, "found 64bit prefix")); 1037 ip++; 1038 default: 1039 ip++; 1040 } 1041 debug_only(has_disp32 = true); // has both kinds of operands! 1042 break; 1043 1044 default: 1045 ShouldNotReachHere(); 1046 1047 #undef REP8 1048 #undef REP16 1049 } 1050 1051 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); 1052 #ifdef _LP64 1053 assert(which != imm_operand, "instruction is not a movq reg, imm64"); 1054 #else 1055 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); 1056 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); 1057 #endif // LP64 1058 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); 1059 1060 // parse the output of emit_operand 1061 int op2 = 0xFF & *ip++; 1062 int base = op2 & 0x07; 1063 int op3 = -1; 1064 const int b100 = 4; 1065 const int b101 = 5; 1066 if (base == b100 && (op2 >> 6) != 3) { 1067 op3 = 0xFF & *ip++; 1068 base = op3 & 0x07; // refetch the base 1069 } 1070 // now ip points at the disp (if any) 1071 1072 switch (op2 >> 6) { 1073 case 0: 1074 // [00 reg 100][ss index base] 1075 // [00 reg 100][00 100 esp] 1076 // [00 reg base] 1077 // [00 reg 100][ss index 101][disp32] 1078 // [00 reg 101] [disp32] 1079 1080 if (base == b101) { 1081 if (which == disp32_operand) 1082 return ip; // caller wants the disp32 1083 ip += 4; // skip the disp32 1084 } 1085 break; 1086 1087 case 1: 1088 // [01 reg 100][ss index base][disp8] 1089 // [01 reg 100][00 100 esp][disp8] 1090 // [01 reg base] [disp8] 1091 ip += 1; // skip the disp8 1092 break; 1093 1094 case 2: 1095 // [10 reg 100][ss index base][disp32] 1096 // [10 reg 100][00 100 esp][disp32] 1097 // [10 reg base] [disp32] 1098 if (which == disp32_operand) 1099 return ip; // caller wants the disp32 1100 ip += 4; // skip the disp32 1101 break; 1102 1103 case 3: 1104 // [11 reg base] (not a memory addressing mode) 1105 break; 1106 } 1107 1108 if (which == end_pc_operand) { 1109 return ip + tail_size; 1110 } 1111 1112 #ifdef _LP64 1113 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); 1114 #else 1115 assert(which == imm_operand, "instruction has only an imm field"); 1116 #endif // LP64 1117 return ip; 1118 } 1119 1120 address Assembler::locate_next_instruction(address inst) { 1121 // Secretly share code with locate_operand: 1122 return locate_operand(inst, end_pc_operand); 1123 } 1124 1125 1126 #ifdef ASSERT 1127 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { 1128 address inst = inst_mark(); 1129 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); 1130 address opnd; 1131 1132 Relocation* r = rspec.reloc(); 1133 if (r->type() == relocInfo::none) { 1134 return; 1135 } else if (r->is_call() || format == call32_operand) { 1136 // assert(format == imm32_operand, "cannot specify a nonzero format"); 1137 opnd = locate_operand(inst, call32_operand); 1138 } else if (r->is_data()) { 1139 assert(format == imm_operand || format == disp32_operand 1140 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); 1141 opnd = locate_operand(inst, (WhichOperand)format); 1142 } else { 1143 assert(format == imm_operand, "cannot specify a format"); 1144 return; 1145 } 1146 assert(opnd == pc(), "must put operand where relocs can find it"); 1147 } 1148 #endif // ASSERT 1149 1150 void Assembler::emit_operand(Register reg, Address adr, 1151 int rip_relative_correction) { 1152 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1153 adr._rspec, 1154 rip_relative_correction); 1155 } 1156 1157 void Assembler::emit_operand(XMMRegister reg, Address adr) { 1158 if (adr.isxmmindex()) { 1159 emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec); 1160 } else { 1161 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1162 adr._rspec); 1163 } 1164 } 1165 1166 // Now the Assembler instructions (identical for 32/64 bits) 1167 1168 void Assembler::adcl(Address dst, int32_t imm32) { 1169 InstructionMark im(this); 1170 prefix(dst); 1171 emit_arith_operand(0x81, rdx, dst, imm32); 1172 } 1173 1174 void Assembler::adcl(Address dst, Register src) { 1175 InstructionMark im(this); 1176 prefix(dst, src); 1177 emit_int8(0x11); 1178 emit_operand(src, dst); 1179 } 1180 1181 void Assembler::adcl(Register dst, int32_t imm32) { 1182 prefix(dst); 1183 emit_arith(0x81, 0xD0, dst, imm32); 1184 } 1185 1186 void Assembler::adcl(Register dst, Address src) { 1187 InstructionMark im(this); 1188 prefix(src, dst); 1189 emit_int8(0x13); 1190 emit_operand(dst, src); 1191 } 1192 1193 void Assembler::adcl(Register dst, Register src) { 1194 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1195 emit_arith(0x13, 0xC0, dst, src); 1196 } 1197 1198 void Assembler::addl(Address dst, int32_t imm32) { 1199 InstructionMark im(this); 1200 prefix(dst); 1201 emit_arith_operand(0x81, rax, dst, imm32); 1202 } 1203 1204 void Assembler::addb(Address dst, int imm8) { 1205 InstructionMark im(this); 1206 prefix(dst); 1207 emit_int8((unsigned char)0x80); 1208 emit_operand(rax, dst, 1); 1209 emit_int8(imm8); 1210 } 1211 1212 void Assembler::addw(Address dst, int imm16) { 1213 InstructionMark im(this); 1214 emit_int8(0x66); 1215 prefix(dst); 1216 emit_int8((unsigned char)0x81); 1217 emit_operand(rax, dst, 2); 1218 emit_int16(imm16); 1219 } 1220 1221 void Assembler::addl(Address dst, Register src) { 1222 InstructionMark im(this); 1223 prefix(dst, src); 1224 emit_int8(0x01); 1225 emit_operand(src, dst); 1226 } 1227 1228 void Assembler::addl(Register dst, int32_t imm32) { 1229 prefix(dst); 1230 emit_arith(0x81, 0xC0, dst, imm32); 1231 } 1232 1233 void Assembler::addl(Register dst, Address src) { 1234 InstructionMark im(this); 1235 prefix(src, dst); 1236 emit_int8(0x03); 1237 emit_operand(dst, src); 1238 } 1239 1240 void Assembler::addl(Register dst, Register src) { 1241 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1242 emit_arith(0x03, 0xC0, dst, src); 1243 } 1244 1245 void Assembler::addr_nop_4() { 1246 assert(UseAddressNop, "no CPU support"); 1247 // 4 bytes: NOP DWORD PTR [EAX+0] 1248 emit_int32(0x0F, 1249 0x1F, 1250 0x40, // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); 1251 0); // 8-bits offset (1 byte) 1252 } 1253 1254 void Assembler::addr_nop_5() { 1255 assert(UseAddressNop, "no CPU support"); 1256 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset 1257 emit_int32(0x0F, 1258 0x1F, 1259 0x44, // emit_rm(cbuf, 0x1, EAX_enc, 0x4); 1260 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1261 emit_int8(0); // 8-bits offset (1 byte) 1262 } 1263 1264 void Assembler::addr_nop_7() { 1265 assert(UseAddressNop, "no CPU support"); 1266 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset 1267 emit_int24(0x0F, 1268 0x1F, 1269 (unsigned char)0x80); 1270 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); 1271 emit_int32(0); // 32-bits offset (4 bytes) 1272 } 1273 1274 void Assembler::addr_nop_8() { 1275 assert(UseAddressNop, "no CPU support"); 1276 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset 1277 emit_int32(0x0F, 1278 0x1F, 1279 (unsigned char)0x84, 1280 // emit_rm(cbuf, 0x2, EAX_enc, 0x4); 1281 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1282 emit_int32(0); // 32-bits offset (4 bytes) 1283 } 1284 1285 void Assembler::addsd(XMMRegister dst, XMMRegister src) { 1286 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1287 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1288 attributes.set_rex_vex_w_reverted(); 1289 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1290 emit_int16(0x58, (0xC0 | encode)); 1291 } 1292 1293 void Assembler::addsd(XMMRegister dst, Address src) { 1294 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1295 InstructionMark im(this); 1296 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1297 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1298 attributes.set_rex_vex_w_reverted(); 1299 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1300 emit_int8(0x58); 1301 emit_operand(dst, src); 1302 } 1303 1304 void Assembler::addss(XMMRegister dst, XMMRegister src) { 1305 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1306 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1307 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1308 emit_int16(0x58, (0xC0 | encode)); 1309 } 1310 1311 void Assembler::addss(XMMRegister dst, Address src) { 1312 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1313 InstructionMark im(this); 1314 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1315 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1316 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1317 emit_int8(0x58); 1318 emit_operand(dst, src); 1319 } 1320 1321 void Assembler::aesdec(XMMRegister dst, Address src) { 1322 assert(VM_Version::supports_aes(), ""); 1323 InstructionMark im(this); 1324 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1325 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1326 emit_int8((unsigned char)0xDE); 1327 emit_operand(dst, src); 1328 } 1329 1330 void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1331 assert(VM_Version::supports_aes(), ""); 1332 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1333 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1334 emit_int16((unsigned char)0xDE, (0xC0 | encode)); 1335 } 1336 1337 void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1338 assert(VM_Version::supports_avx512_vaes(), ""); 1339 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1340 attributes.set_is_evex_instruction(); 1341 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1342 emit_int16((unsigned char)0xDE, (0xC0 | encode)); 1343 } 1344 1345 1346 void Assembler::aesdeclast(XMMRegister dst, Address src) { 1347 assert(VM_Version::supports_aes(), ""); 1348 InstructionMark im(this); 1349 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1350 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1351 emit_int8((unsigned char)0xDF); 1352 emit_operand(dst, src); 1353 } 1354 1355 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1356 assert(VM_Version::supports_aes(), ""); 1357 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1358 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1359 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 1360 } 1361 1362 void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1363 assert(VM_Version::supports_avx512_vaes(), ""); 1364 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1365 attributes.set_is_evex_instruction(); 1366 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1367 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 1368 } 1369 1370 void Assembler::aesenc(XMMRegister dst, Address src) { 1371 assert(VM_Version::supports_aes(), ""); 1372 InstructionMark im(this); 1373 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1374 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1375 emit_int8((unsigned char)0xDC); 1376 emit_operand(dst, src); 1377 } 1378 1379 void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1380 assert(VM_Version::supports_aes(), ""); 1381 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1382 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1383 emit_int16((unsigned char)0xDC, 0xC0 | encode); 1384 } 1385 1386 void Assembler::vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1387 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); 1388 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1389 attributes.set_is_evex_instruction(); 1390 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1391 emit_int16((unsigned char)0xDC, (0xC0 | encode)); 1392 } 1393 1394 void Assembler::aesenclast(XMMRegister dst, Address src) { 1395 assert(VM_Version::supports_aes(), ""); 1396 InstructionMark im(this); 1397 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1398 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1399 emit_int8((unsigned char)0xDD); 1400 emit_operand(dst, src); 1401 } 1402 1403 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1404 assert(VM_Version::supports_aes(), ""); 1405 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1406 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1407 emit_int16((unsigned char)0xDD, (0xC0 | encode)); 1408 } 1409 1410 void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1411 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); 1412 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1413 attributes.set_is_evex_instruction(); 1414 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1415 emit_int16((unsigned char)0xDD, (0xC0 | encode)); 1416 } 1417 1418 void Assembler::andl(Address dst, int32_t imm32) { 1419 InstructionMark im(this); 1420 prefix(dst); 1421 emit_int8((unsigned char)0x81); 1422 emit_operand(rsp, dst, 4); 1423 emit_int32(imm32); 1424 } 1425 1426 void Assembler::andl(Register dst, int32_t imm32) { 1427 prefix(dst); 1428 emit_arith(0x81, 0xE0, dst, imm32); 1429 } 1430 1431 void Assembler::andl(Register dst, Address src) { 1432 InstructionMark im(this); 1433 prefix(src, dst); 1434 emit_int8(0x23); 1435 emit_operand(dst, src); 1436 } 1437 1438 void Assembler::andl(Register dst, Register src) { 1439 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1440 emit_arith(0x23, 0xC0, dst, src); 1441 } 1442 1443 void Assembler::andnl(Register dst, Register src1, Register src2) { 1444 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1445 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1446 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1447 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 1448 } 1449 1450 void Assembler::andnl(Register dst, Register src1, Address src2) { 1451 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1452 InstructionMark im(this); 1453 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1454 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1455 emit_int8((unsigned char)0xF2); 1456 emit_operand(dst, src2); 1457 } 1458 1459 void Assembler::bsfl(Register dst, Register src) { 1460 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1461 emit_int24(0x0F, 1462 (unsigned char)0xBC, 1463 0xC0 | encode); 1464 } 1465 1466 void Assembler::bsrl(Register dst, Register src) { 1467 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1468 emit_int24(0x0F, 1469 (unsigned char)0xBD, 1470 0xC0 | encode); 1471 } 1472 1473 void Assembler::bswapl(Register reg) { // bswap 1474 int encode = prefix_and_encode(reg->encoding()); 1475 emit_int16(0x0F, (0xC8 | encode)); 1476 } 1477 1478 void Assembler::blsil(Register dst, Register src) { 1479 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1480 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1481 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1482 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 1483 } 1484 1485 void Assembler::blsil(Register dst, Address src) { 1486 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1487 InstructionMark im(this); 1488 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1489 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1490 emit_int8((unsigned char)0xF3); 1491 emit_operand(rbx, src); 1492 } 1493 1494 void Assembler::blsmskl(Register dst, Register src) { 1495 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1496 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1497 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1498 emit_int16((unsigned char)0xF3, 1499 0xC0 | encode); 1500 } 1501 1502 void Assembler::blsmskl(Register dst, Address src) { 1503 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1504 InstructionMark im(this); 1505 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1506 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1507 emit_int8((unsigned char)0xF3); 1508 emit_operand(rdx, src); 1509 } 1510 1511 void Assembler::blsrl(Register dst, Register src) { 1512 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1513 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1514 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1515 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 1516 } 1517 1518 void Assembler::blsrl(Register dst, Address src) { 1519 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1520 InstructionMark im(this); 1521 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1522 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1523 emit_int8((unsigned char)0xF3); 1524 emit_operand(rcx, src); 1525 } 1526 1527 void Assembler::call(Label& L, relocInfo::relocType rtype) { 1528 // suspect disp32 is always good 1529 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); 1530 1531 if (L.is_bound()) { 1532 const int long_size = 5; 1533 int offs = (int)( target(L) - pc() ); 1534 assert(offs <= 0, "assembler error"); 1535 InstructionMark im(this); 1536 // 1110 1000 #32-bit disp 1537 emit_int8((unsigned char)0xE8); 1538 emit_data(offs - long_size, rtype, operand); 1539 } else { 1540 InstructionMark im(this); 1541 // 1110 1000 #32-bit disp 1542 L.add_patch_at(code(), locator()); 1543 1544 emit_int8((unsigned char)0xE8); 1545 emit_data(int(0), rtype, operand); 1546 } 1547 } 1548 1549 void Assembler::call(Register dst) { 1550 int encode = prefix_and_encode(dst->encoding()); 1551 emit_int16((unsigned char)0xFF, (0xD0 | encode)); 1552 } 1553 1554 1555 void Assembler::call(Address adr) { 1556 InstructionMark im(this); 1557 prefix(adr); 1558 emit_int8((unsigned char)0xFF); 1559 emit_operand(rdx, adr); 1560 } 1561 1562 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1563 InstructionMark im(this); 1564 emit_int8((unsigned char)0xE8); 1565 intptr_t disp = entry - (pc() + sizeof(int32_t)); 1566 // Entry is NULL in case of a scratch emit. 1567 assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp); 1568 // Technically, should use call32_operand, but this format is 1569 // implied by the fact that we're emitting a call instruction. 1570 1571 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1572 emit_data((int) disp, rspec, operand); 1573 } 1574 1575 void Assembler::cdql() { 1576 emit_int8((unsigned char)0x99); 1577 } 1578 1579 void Assembler::cld() { 1580 emit_int8((unsigned char)0xFC); 1581 } 1582 1583 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1584 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1585 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1586 emit_int24(0x0F, 1587 0x40 | cc, 1588 0xC0 | encode); 1589 } 1590 1591 1592 void Assembler::cmovl(Condition cc, Register dst, Address src) { 1593 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1594 prefix(src, dst); 1595 emit_int16(0x0F, (0x40 | cc)); 1596 emit_operand(dst, src); 1597 } 1598 1599 void Assembler::cmpb(Address dst, int imm8) { 1600 InstructionMark im(this); 1601 prefix(dst); 1602 emit_int8((unsigned char)0x80); 1603 emit_operand(rdi, dst, 1); 1604 emit_int8(imm8); 1605 } 1606 1607 void Assembler::cmpl(Address dst, int32_t imm32) { 1608 InstructionMark im(this); 1609 prefix(dst); 1610 emit_int8((unsigned char)0x81); 1611 emit_operand(rdi, dst, 4); 1612 emit_int32(imm32); 1613 } 1614 1615 void Assembler::cmpl(Register dst, int32_t imm32) { 1616 prefix(dst); 1617 emit_arith(0x81, 0xF8, dst, imm32); 1618 } 1619 1620 void Assembler::cmpl(Register dst, Register src) { 1621 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1622 emit_arith(0x3B, 0xC0, dst, src); 1623 } 1624 1625 void Assembler::cmpl(Register dst, Address src) { 1626 InstructionMark im(this); 1627 prefix(src, dst); 1628 emit_int8(0x3B); 1629 emit_operand(dst, src); 1630 } 1631 1632 void Assembler::cmpw(Address dst, int imm16) { 1633 InstructionMark im(this); 1634 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); 1635 emit_int16(0x66, (unsigned char)0x81); 1636 emit_operand(rdi, dst, 2); 1637 emit_int16(imm16); 1638 } 1639 1640 // The 32-bit cmpxchg compares the value at adr with the contents of rax, 1641 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1642 // The ZF is set if the compared values were equal, and cleared otherwise. 1643 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg 1644 InstructionMark im(this); 1645 prefix(adr, reg); 1646 emit_int16(0x0F, (unsigned char)0xB1); 1647 emit_operand(reg, adr); 1648 } 1649 1650 // The 8-bit cmpxchg compares the value at adr with the contents of rax, 1651 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1652 // The ZF is set if the compared values were equal, and cleared otherwise. 1653 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg 1654 InstructionMark im(this); 1655 prefix(adr, reg, true); 1656 emit_int16(0x0F, (unsigned char)0xB0); 1657 emit_operand(reg, adr); 1658 } 1659 1660 void Assembler::comisd(XMMRegister dst, Address src) { 1661 // NOTE: dbx seems to decode this as comiss even though the 1662 // 0x66 is there. Strangly ucomisd comes out correct 1663 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1664 InstructionMark im(this); 1665 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);; 1666 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1667 attributes.set_rex_vex_w_reverted(); 1668 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1669 emit_int8(0x2F); 1670 emit_operand(dst, src); 1671 } 1672 1673 void Assembler::comisd(XMMRegister dst, XMMRegister src) { 1674 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1675 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1676 attributes.set_rex_vex_w_reverted(); 1677 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1678 emit_int16(0x2F, (0xC0 | encode)); 1679 } 1680 1681 void Assembler::comiss(XMMRegister dst, Address src) { 1682 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1683 InstructionMark im(this); 1684 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1685 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1686 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1687 emit_int8(0x2F); 1688 emit_operand(dst, src); 1689 } 1690 1691 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1692 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1693 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1694 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1695 emit_int16(0x2F, (0xC0 | encode)); 1696 } 1697 1698 void Assembler::cpuid() { 1699 emit_int16(0x0F, (unsigned char)0xA2); 1700 } 1701 1702 // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented 1703 // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v 1704 // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. - 1705 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. - 1706 // 1707 // F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v 1708 // 1709 // F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v 1710 // 1711 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v 1712 void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) { 1713 assert(VM_Version::supports_sse4_2(), ""); 1714 int8_t w = 0x01; 1715 Prefix p = Prefix_EMPTY; 1716 1717 emit_int8((unsigned char)0xF2); 1718 switch (sizeInBytes) { 1719 case 1: 1720 w = 0; 1721 break; 1722 case 2: 1723 case 4: 1724 break; 1725 LP64_ONLY(case 8:) 1726 // This instruction is not valid in 32 bits 1727 // Note: 1728 // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf 1729 // 1730 // Page B - 72 Vol. 2C says 1731 // qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2 1732 // mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m 1733 // F0!!! 1734 // while 3 - 208 Vol. 2A 1735 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64. 1736 // 1737 // the 0 on a last bit is reserved for a different flavor of this instruction : 1738 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8. 1739 p = REX_W; 1740 break; 1741 default: 1742 assert(0, "Unsupported value for a sizeInBytes argument"); 1743 break; 1744 } 1745 LP64_ONLY(prefix(crc, v, p);) 1746 emit_int32(0x0F, 1747 0x38, 1748 0xF0 | w, 1749 0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7)); 1750 } 1751 1752 void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) { 1753 assert(VM_Version::supports_sse4_2(), ""); 1754 InstructionMark im(this); 1755 int8_t w = 0x01; 1756 Prefix p = Prefix_EMPTY; 1757 1758 emit_int8((int8_t)0xF2); 1759 switch (sizeInBytes) { 1760 case 1: 1761 w = 0; 1762 break; 1763 case 2: 1764 case 4: 1765 break; 1766 LP64_ONLY(case 8:) 1767 // This instruction is not valid in 32 bits 1768 p = REX_W; 1769 break; 1770 default: 1771 assert(0, "Unsupported value for a sizeInBytes argument"); 1772 break; 1773 } 1774 LP64_ONLY(prefix(crc, adr, p);) 1775 emit_int24(0x0F, 0x38, (0xF0 | w)); 1776 emit_operand(crc, adr); 1777 } 1778 1779 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1780 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1781 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1782 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1783 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 1784 } 1785 1786 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { 1787 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1788 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1789 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1790 emit_int16(0x5B, (0xC0 | encode)); 1791 } 1792 1793 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1794 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1795 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1796 attributes.set_rex_vex_w_reverted(); 1797 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1798 emit_int16(0x5A, (0xC0 | encode)); 1799 } 1800 1801 void Assembler::cvtsd2ss(XMMRegister dst, Address src) { 1802 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1803 InstructionMark im(this); 1804 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1805 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1806 attributes.set_rex_vex_w_reverted(); 1807 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1808 emit_int8(0x5A); 1809 emit_operand(dst, src); 1810 } 1811 1812 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { 1813 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1814 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1815 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1816 emit_int16(0x2A, (0xC0 | encode)); 1817 } 1818 1819 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { 1820 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1821 InstructionMark im(this); 1822 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1823 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1824 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1825 emit_int8(0x2A); 1826 emit_operand(dst, src); 1827 } 1828 1829 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { 1830 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1831 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1832 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1833 emit_int16(0x2A, (0xC0 | encode)); 1834 } 1835 1836 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { 1837 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1838 InstructionMark im(this); 1839 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1840 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1841 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1842 emit_int8(0x2A); 1843 emit_operand(dst, src); 1844 } 1845 1846 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { 1847 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1848 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1849 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1850 emit_int16(0x2A, (0xC0 | encode)); 1851 } 1852 1853 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 1854 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1855 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1856 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1857 emit_int16(0x5A, (0xC0 | encode)); 1858 } 1859 1860 void Assembler::cvtss2sd(XMMRegister dst, Address src) { 1861 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1862 InstructionMark im(this); 1863 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1864 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1865 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1866 emit_int8(0x5A); 1867 emit_operand(dst, src); 1868 } 1869 1870 1871 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { 1872 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1873 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1874 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1875 emit_int16(0x2C, (0xC0 | encode)); 1876 } 1877 1878 void Assembler::cvttss2sil(Register dst, XMMRegister src) { 1879 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1880 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1881 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1882 emit_int16(0x2C, (0xC0 | encode)); 1883 } 1884 1885 void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) { 1886 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1887 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 1888 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1889 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1890 emit_int16((unsigned char)0xE6, (0xC0 | encode)); 1891 } 1892 1893 void Assembler::pabsb(XMMRegister dst, XMMRegister src) { 1894 assert(VM_Version::supports_ssse3(), ""); 1895 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 1896 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1897 emit_int16(0x1C, (0xC0 | encode)); 1898 } 1899 1900 void Assembler::pabsw(XMMRegister dst, XMMRegister src) { 1901 assert(VM_Version::supports_ssse3(), ""); 1902 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 1903 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1904 emit_int16(0x1D, (0xC0 | encode)); 1905 } 1906 1907 void Assembler::pabsd(XMMRegister dst, XMMRegister src) { 1908 assert(VM_Version::supports_ssse3(), ""); 1909 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1910 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1911 emit_int16(0x1E, (0xC0 | encode)); 1912 } 1913 1914 void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) { 1915 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 1916 vector_len == AVX_256bit? VM_Version::supports_avx2() : 1917 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 1918 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 1919 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1920 emit_int16(0x1C, (0xC0 | encode)); 1921 } 1922 1923 void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) { 1924 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 1925 vector_len == AVX_256bit? VM_Version::supports_avx2() : 1926 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 1927 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 1928 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1929 emit_int16(0x1D, (0xC0 | encode)); 1930 } 1931 1932 void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) { 1933 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 1934 vector_len == AVX_256bit? VM_Version::supports_avx2() : 1935 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, ""); 1936 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1937 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1938 emit_int16(0x1E, (0xC0 | encode)); 1939 } 1940 1941 void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) { 1942 assert(UseAVX > 2, ""); 1943 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1944 attributes.set_is_evex_instruction(); 1945 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1946 emit_int16(0x1F, (0xC0 | encode)); 1947 } 1948 1949 void Assembler::decl(Address dst) { 1950 // Don't use it directly. Use MacroAssembler::decrement() instead. 1951 InstructionMark im(this); 1952 prefix(dst); 1953 emit_int8((unsigned char)0xFF); 1954 emit_operand(rcx, dst); 1955 } 1956 1957 void Assembler::divsd(XMMRegister dst, Address src) { 1958 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1959 InstructionMark im(this); 1960 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1961 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1962 attributes.set_rex_vex_w_reverted(); 1963 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1964 emit_int8(0x5E); 1965 emit_operand(dst, src); 1966 } 1967 1968 void Assembler::divsd(XMMRegister dst, XMMRegister src) { 1969 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1970 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1971 attributes.set_rex_vex_w_reverted(); 1972 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1973 emit_int16(0x5E, (0xC0 | encode)); 1974 } 1975 1976 void Assembler::divss(XMMRegister dst, Address src) { 1977 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1978 InstructionMark im(this); 1979 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1980 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1981 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1982 emit_int8(0x5E); 1983 emit_operand(dst, src); 1984 } 1985 1986 void Assembler::divss(XMMRegister dst, XMMRegister src) { 1987 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1988 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1989 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1990 emit_int16(0x5E, (0xC0 | encode)); 1991 } 1992 1993 void Assembler::hlt() { 1994 emit_int8((unsigned char)0xF4); 1995 } 1996 1997 void Assembler::idivl(Register src) { 1998 int encode = prefix_and_encode(src->encoding()); 1999 emit_int16((unsigned char)0xF7, (0xF8 | encode)); 2000 } 2001 2002 void Assembler::divl(Register src) { // Unsigned 2003 int encode = prefix_and_encode(src->encoding()); 2004 emit_int16((unsigned char)0xF7, (0xF0 | encode)); 2005 } 2006 2007 void Assembler::imull(Register src) { 2008 int encode = prefix_and_encode(src->encoding()); 2009 emit_int16((unsigned char)0xF7, (0xE8 | encode)); 2010 } 2011 2012 void Assembler::imull(Register dst, Register src) { 2013 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2014 emit_int24(0x0F, 2015 (unsigned char)0xAF, 2016 (0xC0 | encode)); 2017 } 2018 2019 2020 void Assembler::imull(Register dst, Register src, int value) { 2021 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2022 if (is8bit(value)) { 2023 emit_int24(0x6B, (0xC0 | encode), value & 0xFF); 2024 } else { 2025 emit_int16(0x69, (0xC0 | encode)); 2026 emit_int32(value); 2027 } 2028 } 2029 2030 void Assembler::imull(Register dst, Address src) { 2031 InstructionMark im(this); 2032 prefix(src, dst); 2033 emit_int16(0x0F, (unsigned char)0xAF); 2034 emit_operand(dst, src); 2035 } 2036 2037 2038 void Assembler::incl(Address dst) { 2039 // Don't use it directly. Use MacroAssembler::increment() instead. 2040 InstructionMark im(this); 2041 prefix(dst); 2042 emit_int8((unsigned char)0xFF); 2043 emit_operand(rax, dst); 2044 } 2045 2046 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { 2047 InstructionMark im(this); 2048 assert((0 <= cc) && (cc < 16), "illegal cc"); 2049 if (L.is_bound()) { 2050 address dst = target(L); 2051 assert(dst != NULL, "jcc most probably wrong"); 2052 2053 const int short_size = 2; 2054 const int long_size = 6; 2055 intptr_t offs = (intptr_t)dst - (intptr_t)pc(); 2056 if (maybe_short && is8bit(offs - short_size)) { 2057 // 0111 tttn #8-bit disp 2058 emit_int16(0x70 | cc, (offs - short_size) & 0xFF); 2059 } else { 2060 // 0000 1111 1000 tttn #32-bit disp 2061 assert(is_simm32(offs - long_size), 2062 "must be 32bit offset (call4)"); 2063 emit_int16(0x0F, (0x80 | cc)); 2064 emit_int32(offs - long_size); 2065 } 2066 } else { 2067 // Note: could eliminate cond. jumps to this jump if condition 2068 // is the same however, seems to be rather unlikely case. 2069 // Note: use jccb() if label to be bound is very close to get 2070 // an 8-bit displacement 2071 L.add_patch_at(code(), locator()); 2072 emit_int16(0x0F, (0x80 | cc)); 2073 emit_int32(0); 2074 } 2075 } 2076 2077 void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) { 2078 if (L.is_bound()) { 2079 const int short_size = 2; 2080 address entry = target(L); 2081 #ifdef ASSERT 2082 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2083 intptr_t delta = short_branch_delta(); 2084 if (delta != 0) { 2085 dist += (dist < 0 ? (-delta) :delta); 2086 } 2087 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2088 #endif 2089 intptr_t offs = (intptr_t)entry - (intptr_t)pc(); 2090 // 0111 tttn #8-bit disp 2091 emit_int16(0x70 | cc, (offs - short_size) & 0xFF); 2092 } else { 2093 InstructionMark im(this); 2094 L.add_patch_at(code(), locator(), file, line); 2095 emit_int16(0x70 | cc, 0); 2096 } 2097 } 2098 2099 void Assembler::jmp(Address adr) { 2100 InstructionMark im(this); 2101 prefix(adr); 2102 emit_int8((unsigned char)0xFF); 2103 emit_operand(rsp, adr); 2104 } 2105 2106 void Assembler::jmp(Label& L, bool maybe_short) { 2107 if (L.is_bound()) { 2108 address entry = target(L); 2109 assert(entry != NULL, "jmp most probably wrong"); 2110 InstructionMark im(this); 2111 const int short_size = 2; 2112 const int long_size = 5; 2113 intptr_t offs = entry - pc(); 2114 if (maybe_short && is8bit(offs - short_size)) { 2115 emit_int16((unsigned char)0xEB, ((offs - short_size) & 0xFF)); 2116 } else { 2117 emit_int8((unsigned char)0xE9); 2118 emit_int32(offs - long_size); 2119 } 2120 } else { 2121 // By default, forward jumps are always 32-bit displacements, since 2122 // we can't yet know where the label will be bound. If you're sure that 2123 // the forward jump will not run beyond 256 bytes, use jmpb to 2124 // force an 8-bit displacement. 2125 InstructionMark im(this); 2126 L.add_patch_at(code(), locator()); 2127 emit_int8((unsigned char)0xE9); 2128 emit_int32(0); 2129 } 2130 } 2131 2132 void Assembler::jmp(Register entry) { 2133 int encode = prefix_and_encode(entry->encoding()); 2134 emit_int16((unsigned char)0xFF, (0xE0 | encode)); 2135 } 2136 2137 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 2138 InstructionMark im(this); 2139 emit_int8((unsigned char)0xE9); 2140 assert(dest != NULL, "must have a target"); 2141 intptr_t disp = dest - (pc() + sizeof(int32_t)); 2142 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 2143 emit_data(disp, rspec.reloc(), call32_operand); 2144 } 2145 2146 void Assembler::jmpb_0(Label& L, const char* file, int line) { 2147 if (L.is_bound()) { 2148 const int short_size = 2; 2149 address entry = target(L); 2150 assert(entry != NULL, "jmp most probably wrong"); 2151 #ifdef ASSERT 2152 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2153 intptr_t delta = short_branch_delta(); 2154 if (delta != 0) { 2155 dist += (dist < 0 ? (-delta) :delta); 2156 } 2157 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2158 #endif 2159 intptr_t offs = entry - pc(); 2160 emit_int16((unsigned char)0xEB, (offs - short_size) & 0xFF); 2161 } else { 2162 InstructionMark im(this); 2163 L.add_patch_at(code(), locator(), file, line); 2164 emit_int16((unsigned char)0xEB, 0); 2165 } 2166 } 2167 2168 void Assembler::ldmxcsr( Address src) { 2169 if (UseAVX > 0 ) { 2170 InstructionMark im(this); 2171 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2172 vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2173 emit_int8((unsigned char)0xAE); 2174 emit_operand(as_Register(2), src); 2175 } else { 2176 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2177 InstructionMark im(this); 2178 prefix(src); 2179 emit_int16(0x0F, (unsigned char)0xAE); 2180 emit_operand(as_Register(2), src); 2181 } 2182 } 2183 2184 void Assembler::leal(Register dst, Address src) { 2185 InstructionMark im(this); 2186 #ifdef _LP64 2187 emit_int8(0x67); // addr32 2188 prefix(src, dst); 2189 #endif // LP64 2190 emit_int8((unsigned char)0x8D); 2191 emit_operand(dst, src); 2192 } 2193 2194 void Assembler::lfence() { 2195 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xE8); 2196 } 2197 2198 void Assembler::lock() { 2199 emit_int8((unsigned char)0xF0); 2200 } 2201 2202 void Assembler::lzcntl(Register dst, Register src) { 2203 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 2204 emit_int8((unsigned char)0xF3); 2205 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2206 emit_int24(0x0F, (unsigned char)0xBD, (0xC0 | encode)); 2207 } 2208 2209 // Emit mfence instruction 2210 void Assembler::mfence() { 2211 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2212 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF0); 2213 } 2214 2215 // Emit sfence instruction 2216 void Assembler::sfence() { 2217 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2218 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF8); 2219 } 2220 2221 void Assembler::mov(Register dst, Register src) { 2222 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2223 } 2224 2225 void Assembler::movapd(XMMRegister dst, XMMRegister src) { 2226 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2227 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2228 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2229 attributes.set_rex_vex_w_reverted(); 2230 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2231 emit_int16(0x28, (0xC0 | encode)); 2232 } 2233 2234 void Assembler::movaps(XMMRegister dst, XMMRegister src) { 2235 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2236 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2237 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2238 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2239 emit_int16(0x28, (0xC0 | encode)); 2240 } 2241 2242 void Assembler::movlhps(XMMRegister dst, XMMRegister src) { 2243 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2244 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2245 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2246 emit_int16(0x16, (0xC0 | encode)); 2247 } 2248 2249 void Assembler::movb(Register dst, Address src) { 2250 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 2251 InstructionMark im(this); 2252 prefix(src, dst, true); 2253 emit_int8((unsigned char)0x8A); 2254 emit_operand(dst, src); 2255 } 2256 2257 void Assembler::movddup(XMMRegister dst, XMMRegister src) { 2258 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 2259 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2260 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2261 attributes.set_rex_vex_w_reverted(); 2262 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2263 emit_int16(0x12, 0xC0 | encode); 2264 } 2265 2266 void Assembler::kmovbl(KRegister dst, Register src) { 2267 assert(VM_Version::supports_avx512dq(), ""); 2268 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2269 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2270 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2271 } 2272 2273 void Assembler::kmovbl(Register dst, KRegister src) { 2274 assert(VM_Version::supports_avx512dq(), ""); 2275 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2276 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2277 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2278 } 2279 2280 void Assembler::kmovwl(KRegister dst, Register src) { 2281 assert(VM_Version::supports_evex(), ""); 2282 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2283 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2284 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2285 } 2286 2287 void Assembler::kmovwl(Register dst, KRegister src) { 2288 assert(VM_Version::supports_evex(), ""); 2289 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2290 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2291 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2292 } 2293 2294 void Assembler::kmovwl(KRegister dst, Address src) { 2295 assert(VM_Version::supports_evex(), ""); 2296 InstructionMark im(this); 2297 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2298 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2299 emit_int8((unsigned char)0x90); 2300 emit_operand((Register)dst, src); 2301 } 2302 2303 void Assembler::kmovdl(KRegister dst, Register src) { 2304 assert(VM_Version::supports_avx512bw(), ""); 2305 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2306 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2307 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2308 } 2309 2310 void Assembler::kmovdl(Register dst, KRegister src) { 2311 assert(VM_Version::supports_avx512bw(), ""); 2312 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2313 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2314 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2315 } 2316 2317 void Assembler::kmovql(KRegister dst, KRegister src) { 2318 assert(VM_Version::supports_avx512bw(), ""); 2319 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2320 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2321 emit_int16((unsigned char)0x90, (0xC0 | encode)); 2322 } 2323 2324 void Assembler::kmovql(KRegister dst, Address src) { 2325 assert(VM_Version::supports_avx512bw(), ""); 2326 InstructionMark im(this); 2327 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2328 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2329 emit_int8((unsigned char)0x90); 2330 emit_operand((Register)dst, src); 2331 } 2332 2333 void Assembler::kmovql(Address dst, KRegister src) { 2334 assert(VM_Version::supports_avx512bw(), ""); 2335 InstructionMark im(this); 2336 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2337 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2338 emit_int8((unsigned char)0x90); 2339 emit_operand((Register)src, dst); 2340 } 2341 2342 void Assembler::kmovql(KRegister dst, Register src) { 2343 assert(VM_Version::supports_avx512bw(), ""); 2344 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2345 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2346 emit_int16((unsigned char)0x92, (0xC0 | encode)); 2347 } 2348 2349 void Assembler::kmovql(Register dst, KRegister src) { 2350 assert(VM_Version::supports_avx512bw(), ""); 2351 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2352 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2353 emit_int16((unsigned char)0x93, (0xC0 | encode)); 2354 } 2355 2356 void Assembler::knotwl(KRegister dst, KRegister src) { 2357 assert(VM_Version::supports_evex(), ""); 2358 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2359 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2360 emit_int16(0x44, (0xC0 | encode)); 2361 } 2362 2363 // This instruction produces ZF or CF flags 2364 void Assembler::kortestbl(KRegister src1, KRegister src2) { 2365 assert(VM_Version::supports_avx512dq(), ""); 2366 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2367 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2368 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2369 } 2370 2371 // This instruction produces ZF or CF flags 2372 void Assembler::kortestwl(KRegister src1, KRegister src2) { 2373 assert(VM_Version::supports_evex(), ""); 2374 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2375 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2376 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2377 } 2378 2379 // This instruction produces ZF or CF flags 2380 void Assembler::kortestdl(KRegister src1, KRegister src2) { 2381 assert(VM_Version::supports_avx512bw(), ""); 2382 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2383 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2384 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2385 } 2386 2387 // This instruction produces ZF or CF flags 2388 void Assembler::kortestql(KRegister src1, KRegister src2) { 2389 assert(VM_Version::supports_avx512bw(), ""); 2390 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2391 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2392 emit_int16((unsigned char)0x98, (0xC0 | encode)); 2393 } 2394 2395 // This instruction produces ZF or CF flags 2396 void Assembler::ktestql(KRegister src1, KRegister src2) { 2397 assert(VM_Version::supports_avx512bw(), ""); 2398 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2399 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2400 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2401 } 2402 2403 void Assembler::ktestq(KRegister src1, KRegister src2) { 2404 assert(VM_Version::supports_avx512bw(), ""); 2405 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2406 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2407 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2408 } 2409 2410 void Assembler::ktestd(KRegister src1, KRegister src2) { 2411 assert(VM_Version::supports_avx512bw(), ""); 2412 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2413 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2414 emit_int16((unsigned char)0x99, (0xC0 | encode)); 2415 } 2416 2417 void Assembler::movb(Address dst, int imm8) { 2418 InstructionMark im(this); 2419 prefix(dst); 2420 emit_int8((unsigned char)0xC6); 2421 emit_operand(rax, dst, 1); 2422 emit_int8(imm8); 2423 } 2424 2425 2426 void Assembler::movb(Address dst, Register src) { 2427 assert(src->has_byte_register(), "must have byte register"); 2428 InstructionMark im(this); 2429 prefix(dst, src, true); 2430 emit_int8((unsigned char)0x88); 2431 emit_operand(src, dst); 2432 } 2433 2434 void Assembler::movdl(XMMRegister dst, Register src) { 2435 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2436 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2437 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2438 emit_int16(0x6E, (0xC0 | encode)); 2439 } 2440 2441 void Assembler::movdl(Register dst, XMMRegister src) { 2442 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2443 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2444 // swap src/dst to get correct prefix 2445 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2446 emit_int16(0x7E, (0xC0 | encode)); 2447 } 2448 2449 void Assembler::movdl(XMMRegister dst, Address src) { 2450 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2451 InstructionMark im(this); 2452 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2453 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2454 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2455 emit_int8(0x6E); 2456 emit_operand(dst, src); 2457 } 2458 2459 void Assembler::movdl(Address dst, XMMRegister src) { 2460 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2461 InstructionMark im(this); 2462 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2463 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2464 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2465 emit_int8(0x7E); 2466 emit_operand(src, dst); 2467 } 2468 2469 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { 2470 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2471 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2472 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2473 emit_int16(0x6F, (0xC0 | encode)); 2474 } 2475 2476 void Assembler::movdqa(XMMRegister dst, Address src) { 2477 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2478 InstructionMark im(this); 2479 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2480 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2481 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2482 emit_int8(0x6F); 2483 emit_operand(dst, src); 2484 } 2485 2486 void Assembler::movdqu(XMMRegister dst, Address src) { 2487 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2488 InstructionMark im(this); 2489 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2490 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2491 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2492 emit_int8(0x6F); 2493 emit_operand(dst, src); 2494 } 2495 2496 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { 2497 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2498 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2499 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2500 emit_int16(0x6F, (0xC0 | encode)); 2501 } 2502 2503 void Assembler::movdqu(Address dst, XMMRegister src) { 2504 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2505 InstructionMark im(this); 2506 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2507 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2508 attributes.reset_is_clear_context(); 2509 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2510 emit_int8(0x7F); 2511 emit_operand(src, dst); 2512 } 2513 2514 // Move Unaligned 256bit Vector 2515 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2516 assert(UseAVX > 0, ""); 2517 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2518 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2519 emit_int16(0x6F, (0xC0 | encode)); 2520 } 2521 2522 void Assembler::vmovdqu(XMMRegister dst, Address src) { 2523 assert(UseAVX > 0, ""); 2524 InstructionMark im(this); 2525 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2526 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2527 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2528 emit_int8(0x6F); 2529 emit_operand(dst, src); 2530 } 2531 2532 void Assembler::vmovdqu(Address dst, XMMRegister src) { 2533 assert(UseAVX > 0, ""); 2534 InstructionMark im(this); 2535 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2536 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2537 attributes.reset_is_clear_context(); 2538 // swap src<->dst for encoding 2539 assert(src != xnoreg, "sanity"); 2540 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2541 emit_int8(0x7F); 2542 emit_operand(src, dst); 2543 } 2544 2545 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64) 2546 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { 2547 assert(VM_Version::supports_evex(), ""); 2548 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2549 attributes.set_is_evex_instruction(); 2550 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2551 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2552 emit_int16(0x6F, (0xC0 | encode)); 2553 } 2554 2555 void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) { 2556 assert(VM_Version::supports_evex(), ""); 2557 InstructionMark im(this); 2558 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2559 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2560 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2561 attributes.set_is_evex_instruction(); 2562 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2563 emit_int8(0x6F); 2564 emit_operand(dst, src); 2565 } 2566 2567 void Assembler::evmovdqub(Address dst, XMMRegister src, int vector_len) { 2568 assert(VM_Version::supports_evex(), ""); 2569 assert(src != xnoreg, "sanity"); 2570 InstructionMark im(this); 2571 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2572 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2573 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2574 attributes.set_is_evex_instruction(); 2575 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2576 emit_int8(0x7F); 2577 emit_operand(src, dst); 2578 } 2579 2580 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len) { 2581 assert(VM_Version::supports_avx512vlbw(), ""); 2582 InstructionMark im(this); 2583 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2584 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2585 attributes.set_embedded_opmask_register_specifier(mask); 2586 attributes.set_is_evex_instruction(); 2587 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2588 emit_int8(0x6F); 2589 emit_operand(dst, src); 2590 } 2591 2592 void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) { 2593 assert(VM_Version::supports_evex(), ""); 2594 InstructionMark im(this); 2595 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2596 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2597 attributes.set_is_evex_instruction(); 2598 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2599 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2600 emit_int8(0x6F); 2601 emit_operand(dst, src); 2602 } 2603 2604 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len) { 2605 assert(VM_Version::supports_avx512vlbw(), ""); 2606 InstructionMark im(this); 2607 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2608 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2609 attributes.set_embedded_opmask_register_specifier(mask); 2610 attributes.set_is_evex_instruction(); 2611 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2612 emit_int8(0x6F); 2613 emit_operand(dst, src); 2614 } 2615 2616 void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) { 2617 assert(VM_Version::supports_evex(), ""); 2618 assert(src != xnoreg, "sanity"); 2619 InstructionMark im(this); 2620 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2621 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2622 attributes.set_is_evex_instruction(); 2623 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2624 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2625 emit_int8(0x7F); 2626 emit_operand(src, dst); 2627 } 2628 2629 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len) { 2630 assert(VM_Version::supports_avx512vlbw(), ""); 2631 assert(src != xnoreg, "sanity"); 2632 InstructionMark im(this); 2633 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2634 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2635 attributes.reset_is_clear_context(); 2636 attributes.set_embedded_opmask_register_specifier(mask); 2637 attributes.set_is_evex_instruction(); 2638 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2639 emit_int8(0x7F); 2640 emit_operand(src, dst); 2641 } 2642 2643 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 2644 assert(VM_Version::supports_evex(), ""); 2645 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2646 attributes.set_is_evex_instruction(); 2647 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2648 emit_int16(0x6F, (0xC0 | encode)); 2649 } 2650 2651 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) { 2652 assert(VM_Version::supports_evex(), ""); 2653 InstructionMark im(this); 2654 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ true); 2655 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2656 attributes.set_is_evex_instruction(); 2657 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2658 emit_int8(0x6F); 2659 emit_operand(dst, src); 2660 } 2661 2662 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) { 2663 assert(VM_Version::supports_evex(), ""); 2664 assert(src != xnoreg, "sanity"); 2665 InstructionMark im(this); 2666 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2667 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2668 attributes.reset_is_clear_context(); 2669 attributes.set_is_evex_instruction(); 2670 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2671 emit_int8(0x7F); 2672 emit_operand(src, dst); 2673 } 2674 2675 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 2676 assert(VM_Version::supports_evex(), ""); 2677 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2678 attributes.set_is_evex_instruction(); 2679 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2680 emit_int16(0x6F, (0xC0 | encode)); 2681 } 2682 2683 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) { 2684 assert(VM_Version::supports_evex(), ""); 2685 InstructionMark im(this); 2686 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2687 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2688 attributes.set_is_evex_instruction(); 2689 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2690 emit_int8(0x6F); 2691 emit_operand(dst, src); 2692 } 2693 2694 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) { 2695 assert(VM_Version::supports_evex(), ""); 2696 assert(src != xnoreg, "sanity"); 2697 InstructionMark im(this); 2698 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2699 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2700 attributes.reset_is_clear_context(); 2701 attributes.set_is_evex_instruction(); 2702 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2703 emit_int8(0x7F); 2704 emit_operand(src, dst); 2705 } 2706 2707 // Uses zero extension on 64bit 2708 2709 void Assembler::movl(Register dst, int32_t imm32) { 2710 int encode = prefix_and_encode(dst->encoding()); 2711 emit_int8(0xB8 | encode); 2712 emit_int32(imm32); 2713 } 2714 2715 void Assembler::movl(Register dst, Register src) { 2716 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2717 emit_int16((unsigned char)0x8B, (0xC0 | encode)); 2718 } 2719 2720 void Assembler::movl(Register dst, Address src) { 2721 InstructionMark im(this); 2722 prefix(src, dst); 2723 emit_int8((unsigned char)0x8B); 2724 emit_operand(dst, src); 2725 } 2726 2727 void Assembler::movl(Address dst, int32_t imm32) { 2728 InstructionMark im(this); 2729 prefix(dst); 2730 emit_int8((unsigned char)0xC7); 2731 emit_operand(rax, dst, 4); 2732 emit_int32(imm32); 2733 } 2734 2735 void Assembler::movl(Address dst, Register src) { 2736 InstructionMark im(this); 2737 prefix(dst, src); 2738 emit_int8((unsigned char)0x89); 2739 emit_operand(src, dst); 2740 } 2741 2742 // New cpus require to use movsd and movss to avoid partial register stall 2743 // when loading from memory. But for old Opteron use movlpd instead of movsd. 2744 // The selection is done in MacroAssembler::movdbl() and movflt(). 2745 void Assembler::movlpd(XMMRegister dst, Address src) { 2746 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2747 InstructionMark im(this); 2748 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2749 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2750 attributes.set_rex_vex_w_reverted(); 2751 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2752 emit_int8(0x12); 2753 emit_operand(dst, src); 2754 } 2755 2756 void Assembler::movq(XMMRegister dst, Address src) { 2757 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2758 InstructionMark im(this); 2759 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2760 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2761 attributes.set_rex_vex_w_reverted(); 2762 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2763 emit_int8(0x7E); 2764 emit_operand(dst, src); 2765 } 2766 2767 void Assembler::movq(Address dst, XMMRegister src) { 2768 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2769 InstructionMark im(this); 2770 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2771 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2772 attributes.set_rex_vex_w_reverted(); 2773 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2774 emit_int8((unsigned char)0xD6); 2775 emit_operand(src, dst); 2776 } 2777 2778 void Assembler::movsbl(Register dst, Address src) { // movsxb 2779 InstructionMark im(this); 2780 prefix(src, dst); 2781 emit_int16(0x0F, (unsigned char)0xBE); 2782 emit_operand(dst, src); 2783 } 2784 2785 void Assembler::movsbl(Register dst, Register src) { // movsxb 2786 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2787 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 2788 emit_int24(0x0F, (unsigned char)0xBE, (0xC0 | encode)); 2789 } 2790 2791 void Assembler::movsd(XMMRegister dst, XMMRegister src) { 2792 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2793 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2794 attributes.set_rex_vex_w_reverted(); 2795 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2796 emit_int16(0x10, (0xC0 | encode)); 2797 } 2798 2799 void Assembler::movsd(XMMRegister dst, Address src) { 2800 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2801 InstructionMark im(this); 2802 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2803 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2804 attributes.set_rex_vex_w_reverted(); 2805 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2806 emit_int8(0x10); 2807 emit_operand(dst, src); 2808 } 2809 2810 void Assembler::movsd(Address dst, XMMRegister src) { 2811 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2812 InstructionMark im(this); 2813 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2814 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2815 attributes.reset_is_clear_context(); 2816 attributes.set_rex_vex_w_reverted(); 2817 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2818 emit_int8(0x11); 2819 emit_operand(src, dst); 2820 } 2821 2822 void Assembler::movss(XMMRegister dst, XMMRegister src) { 2823 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2824 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2825 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2826 emit_int16(0x10, (0xC0 | encode)); 2827 } 2828 2829 void Assembler::movss(XMMRegister dst, Address src) { 2830 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2831 InstructionMark im(this); 2832 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2833 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2834 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2835 emit_int8(0x10); 2836 emit_operand(dst, src); 2837 } 2838 2839 void Assembler::movss(Address dst, XMMRegister src) { 2840 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2841 InstructionMark im(this); 2842 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2843 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2844 attributes.reset_is_clear_context(); 2845 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2846 emit_int8(0x11); 2847 emit_operand(src, dst); 2848 } 2849 2850 void Assembler::movswl(Register dst, Address src) { // movsxw 2851 InstructionMark im(this); 2852 prefix(src, dst); 2853 emit_int16(0x0F, (unsigned char)0xBF); 2854 emit_operand(dst, src); 2855 } 2856 2857 void Assembler::movswl(Register dst, Register src) { // movsxw 2858 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2859 emit_int24(0x0F, (unsigned char)0xBF, (0xC0 | encode)); 2860 } 2861 2862 void Assembler::movw(Address dst, int imm16) { 2863 InstructionMark im(this); 2864 2865 emit_int8(0x66); // switch to 16-bit mode 2866 prefix(dst); 2867 emit_int8((unsigned char)0xC7); 2868 emit_operand(rax, dst, 2); 2869 emit_int16(imm16); 2870 } 2871 2872 void Assembler::movw(Register dst, Address src) { 2873 InstructionMark im(this); 2874 emit_int8(0x66); 2875 prefix(src, dst); 2876 emit_int8((unsigned char)0x8B); 2877 emit_operand(dst, src); 2878 } 2879 2880 void Assembler::movw(Address dst, Register src) { 2881 InstructionMark im(this); 2882 emit_int8(0x66); 2883 prefix(dst, src); 2884 emit_int8((unsigned char)0x89); 2885 emit_operand(src, dst); 2886 } 2887 2888 void Assembler::movzbl(Register dst, Address src) { // movzxb 2889 InstructionMark im(this); 2890 prefix(src, dst); 2891 emit_int16(0x0F, (unsigned char)0xB6); 2892 emit_operand(dst, src); 2893 } 2894 2895 void Assembler::movzbl(Register dst, Register src) { // movzxb 2896 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2897 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 2898 emit_int24(0x0F, (unsigned char)0xB6, 0xC0 | encode); 2899 } 2900 2901 void Assembler::movzwl(Register dst, Address src) { // movzxw 2902 InstructionMark im(this); 2903 prefix(src, dst); 2904 emit_int16(0x0F, (unsigned char)0xB7); 2905 emit_operand(dst, src); 2906 } 2907 2908 void Assembler::movzwl(Register dst, Register src) { // movzxw 2909 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2910 emit_int24(0x0F, (unsigned char)0xB7, 0xC0 | encode); 2911 } 2912 2913 void Assembler::mull(Address src) { 2914 InstructionMark im(this); 2915 prefix(src); 2916 emit_int8((unsigned char)0xF7); 2917 emit_operand(rsp, src); 2918 } 2919 2920 void Assembler::mull(Register src) { 2921 int encode = prefix_and_encode(src->encoding()); 2922 emit_int16((unsigned char)0xF7, (0xE0 | encode)); 2923 } 2924 2925 void Assembler::mulsd(XMMRegister dst, Address src) { 2926 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2927 InstructionMark im(this); 2928 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2929 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2930 attributes.set_rex_vex_w_reverted(); 2931 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2932 emit_int8(0x59); 2933 emit_operand(dst, src); 2934 } 2935 2936 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { 2937 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2938 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2939 attributes.set_rex_vex_w_reverted(); 2940 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2941 emit_int16(0x59, (0xC0 | encode)); 2942 } 2943 2944 void Assembler::mulss(XMMRegister dst, Address src) { 2945 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2946 InstructionMark im(this); 2947 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2948 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2949 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2950 emit_int8(0x59); 2951 emit_operand(dst, src); 2952 } 2953 2954 void Assembler::mulss(XMMRegister dst, XMMRegister src) { 2955 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2956 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2957 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2958 emit_int16(0x59, (0xC0 | encode)); 2959 } 2960 2961 void Assembler::negl(Register dst) { 2962 int encode = prefix_and_encode(dst->encoding()); 2963 emit_int16((unsigned char)0xF7, (0xD8 | encode)); 2964 } 2965 2966 void Assembler::nop(int i) { 2967 #ifdef ASSERT 2968 assert(i > 0, " "); 2969 // The fancy nops aren't currently recognized by debuggers making it a 2970 // pain to disassemble code while debugging. If asserts are on clearly 2971 // speed is not an issue so simply use the single byte traditional nop 2972 // to do alignment. 2973 2974 for (; i > 0 ; i--) emit_int8((unsigned char)0x90); 2975 return; 2976 2977 #endif // ASSERT 2978 2979 if (UseAddressNop && VM_Version::is_intel()) { 2980 // 2981 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel 2982 // 1: 0x90 2983 // 2: 0x66 0x90 2984 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2985 // 4: 0x0F 0x1F 0x40 0x00 2986 // 5: 0x0F 0x1F 0x44 0x00 0x00 2987 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2988 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2989 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2990 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2991 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2992 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2993 2994 // The rest coding is Intel specific - don't use consecutive address nops 2995 2996 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2997 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2998 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2999 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3000 3001 while(i >= 15) { 3002 // For Intel don't generate consecutive addess nops (mix with regular nops) 3003 i -= 15; 3004 emit_int24(0x66, 0x66, 0x66); 3005 addr_nop_8(); 3006 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3007 } 3008 switch (i) { 3009 case 14: 3010 emit_int8(0x66); // size prefix 3011 case 13: 3012 emit_int8(0x66); // size prefix 3013 case 12: 3014 addr_nop_8(); 3015 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3016 break; 3017 case 11: 3018 emit_int8(0x66); // size prefix 3019 case 10: 3020 emit_int8(0x66); // size prefix 3021 case 9: 3022 emit_int8(0x66); // size prefix 3023 case 8: 3024 addr_nop_8(); 3025 break; 3026 case 7: 3027 addr_nop_7(); 3028 break; 3029 case 6: 3030 emit_int8(0x66); // size prefix 3031 case 5: 3032 addr_nop_5(); 3033 break; 3034 case 4: 3035 addr_nop_4(); 3036 break; 3037 case 3: 3038 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3039 emit_int8(0x66); // size prefix 3040 case 2: 3041 emit_int8(0x66); // size prefix 3042 case 1: 3043 emit_int8((unsigned char)0x90); 3044 // nop 3045 break; 3046 default: 3047 assert(i == 0, " "); 3048 } 3049 return; 3050 } 3051 if (UseAddressNop && VM_Version::is_amd_family()) { 3052 // 3053 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. 3054 // 1: 0x90 3055 // 2: 0x66 0x90 3056 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3057 // 4: 0x0F 0x1F 0x40 0x00 3058 // 5: 0x0F 0x1F 0x44 0x00 0x00 3059 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3060 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3061 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3062 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3063 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3064 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3065 3066 // The rest coding is AMD specific - use consecutive address nops 3067 3068 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3069 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3070 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3071 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3072 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3073 // Size prefixes (0x66) are added for larger sizes 3074 3075 while(i >= 22) { 3076 i -= 11; 3077 emit_int24(0x66, 0x66, 0x66); 3078 addr_nop_8(); 3079 } 3080 // Generate first nop for size between 21-12 3081 switch (i) { 3082 case 21: 3083 i -= 1; 3084 emit_int8(0x66); // size prefix 3085 case 20: 3086 case 19: 3087 i -= 1; 3088 emit_int8(0x66); // size prefix 3089 case 18: 3090 case 17: 3091 i -= 1; 3092 emit_int8(0x66); // size prefix 3093 case 16: 3094 case 15: 3095 i -= 8; 3096 addr_nop_8(); 3097 break; 3098 case 14: 3099 case 13: 3100 i -= 7; 3101 addr_nop_7(); 3102 break; 3103 case 12: 3104 i -= 6; 3105 emit_int8(0x66); // size prefix 3106 addr_nop_5(); 3107 break; 3108 default: 3109 assert(i < 12, " "); 3110 } 3111 3112 // Generate second nop for size between 11-1 3113 switch (i) { 3114 case 11: 3115 emit_int8(0x66); // size prefix 3116 case 10: 3117 emit_int8(0x66); // size prefix 3118 case 9: 3119 emit_int8(0x66); // size prefix 3120 case 8: 3121 addr_nop_8(); 3122 break; 3123 case 7: 3124 addr_nop_7(); 3125 break; 3126 case 6: 3127 emit_int8(0x66); // size prefix 3128 case 5: 3129 addr_nop_5(); 3130 break; 3131 case 4: 3132 addr_nop_4(); 3133 break; 3134 case 3: 3135 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3136 emit_int8(0x66); // size prefix 3137 case 2: 3138 emit_int8(0x66); // size prefix 3139 case 1: 3140 emit_int8((unsigned char)0x90); 3141 // nop 3142 break; 3143 default: 3144 assert(i == 0, " "); 3145 } 3146 return; 3147 } 3148 3149 if (UseAddressNop && VM_Version::is_zx()) { 3150 // 3151 // Using multi-bytes nops "0x0F 0x1F [address]" for ZX 3152 // 1: 0x90 3153 // 2: 0x66 0x90 3154 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3155 // 4: 0x0F 0x1F 0x40 0x00 3156 // 5: 0x0F 0x1F 0x44 0x00 0x00 3157 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3158 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3159 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3160 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3161 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3162 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3163 3164 // The rest coding is ZX specific - don't use consecutive address nops 3165 3166 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3167 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3168 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3169 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3170 3171 while (i >= 15) { 3172 // For ZX don't generate consecutive addess nops (mix with regular nops) 3173 i -= 15; 3174 emit_int24(0x66, 0x66, 0x66); 3175 addr_nop_8(); 3176 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3177 } 3178 switch (i) { 3179 case 14: 3180 emit_int8(0x66); // size prefix 3181 case 13: 3182 emit_int8(0x66); // size prefix 3183 case 12: 3184 addr_nop_8(); 3185 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3186 break; 3187 case 11: 3188 emit_int8(0x66); // size prefix 3189 case 10: 3190 emit_int8(0x66); // size prefix 3191 case 9: 3192 emit_int8(0x66); // size prefix 3193 case 8: 3194 addr_nop_8(); 3195 break; 3196 case 7: 3197 addr_nop_7(); 3198 break; 3199 case 6: 3200 emit_int8(0x66); // size prefix 3201 case 5: 3202 addr_nop_5(); 3203 break; 3204 case 4: 3205 addr_nop_4(); 3206 break; 3207 case 3: 3208 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3209 emit_int8(0x66); // size prefix 3210 case 2: 3211 emit_int8(0x66); // size prefix 3212 case 1: 3213 emit_int8((unsigned char)0x90); 3214 // nop 3215 break; 3216 default: 3217 assert(i == 0, " "); 3218 } 3219 return; 3220 } 3221 3222 // Using nops with size prefixes "0x66 0x90". 3223 // From AMD Optimization Guide: 3224 // 1: 0x90 3225 // 2: 0x66 0x90 3226 // 3: 0x66 0x66 0x90 3227 // 4: 0x66 0x66 0x66 0x90 3228 // 5: 0x66 0x66 0x90 0x66 0x90 3229 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 3230 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 3231 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 3232 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3233 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3234 // 3235 while (i > 12) { 3236 i -= 4; 3237 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3238 } 3239 // 1 - 12 nops 3240 if (i > 8) { 3241 if (i > 9) { 3242 i -= 1; 3243 emit_int8(0x66); 3244 } 3245 i -= 3; 3246 emit_int24(0x66, 0x66, (unsigned char)0x90); 3247 } 3248 // 1 - 8 nops 3249 if (i > 4) { 3250 if (i > 6) { 3251 i -= 1; 3252 emit_int8(0x66); 3253 } 3254 i -= 3; 3255 emit_int24(0x66, 0x66, (unsigned char)0x90); 3256 } 3257 switch (i) { 3258 case 4: 3259 emit_int8(0x66); 3260 case 3: 3261 emit_int8(0x66); 3262 case 2: 3263 emit_int8(0x66); 3264 case 1: 3265 emit_int8((unsigned char)0x90); 3266 break; 3267 default: 3268 assert(i == 0, " "); 3269 } 3270 } 3271 3272 void Assembler::notl(Register dst) { 3273 int encode = prefix_and_encode(dst->encoding()); 3274 emit_int16((unsigned char)0xF7, (0xD0 | encode)); 3275 } 3276 3277 void Assembler::orl(Address dst, int32_t imm32) { 3278 InstructionMark im(this); 3279 prefix(dst); 3280 emit_arith_operand(0x81, rcx, dst, imm32); 3281 } 3282 3283 void Assembler::orl(Register dst, int32_t imm32) { 3284 prefix(dst); 3285 emit_arith(0x81, 0xC8, dst, imm32); 3286 } 3287 3288 void Assembler::orl(Register dst, Address src) { 3289 InstructionMark im(this); 3290 prefix(src, dst); 3291 emit_int8(0x0B); 3292 emit_operand(dst, src); 3293 } 3294 3295 void Assembler::orl(Register dst, Register src) { 3296 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3297 emit_arith(0x0B, 0xC0, dst, src); 3298 } 3299 3300 void Assembler::orl(Address dst, Register src) { 3301 InstructionMark im(this); 3302 prefix(dst, src); 3303 emit_int8(0x09); 3304 emit_operand(src, dst); 3305 } 3306 3307 void Assembler::orb(Address dst, int imm8) { 3308 InstructionMark im(this); 3309 prefix(dst); 3310 emit_int8((unsigned char)0x80); 3311 emit_operand(rcx, dst, 1); 3312 emit_int8(imm8); 3313 } 3314 3315 void Assembler::packuswb(XMMRegister dst, Address src) { 3316 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3317 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3318 InstructionMark im(this); 3319 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3320 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3321 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3322 emit_int8(0x67); 3323 emit_operand(dst, src); 3324 } 3325 3326 void Assembler::packuswb(XMMRegister dst, XMMRegister src) { 3327 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3328 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3329 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3330 emit_int16(0x67, (0xC0 | encode)); 3331 } 3332 3333 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3334 assert(UseAVX > 0, "some form of AVX must be enabled"); 3335 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3336 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3337 emit_int16(0x67, (0xC0 | encode)); 3338 } 3339 3340 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 3341 assert(VM_Version::supports_avx2(), ""); 3342 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3343 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3344 emit_int24(0x00, (0xC0 | encode), imm8); 3345 } 3346 3347 void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3348 assert(UseAVX > 2, "requires AVX512F"); 3349 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3350 attributes.set_is_evex_instruction(); 3351 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3352 emit_int16(0x36, (0xC0 | encode)); 3353 } 3354 3355 void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 3356 assert(VM_Version::supports_avx2(), ""); 3357 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3358 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3359 emit_int24(0x46, (0xC0 | encode), imm8); 3360 } 3361 3362 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 3363 assert(VM_Version::supports_avx(), ""); 3364 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3365 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3366 emit_int24(0x06, (0xC0 | encode), imm8); 3367 } 3368 3369 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3370 assert(VM_Version::supports_evex(), ""); 3371 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3372 attributes.set_is_evex_instruction(); 3373 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3374 emit_int16(0x76, (0xC0 | encode)); 3375 } 3376 3377 3378 void Assembler::pause() { 3379 emit_int16((unsigned char)0xF3, (unsigned char)0x90); 3380 } 3381 3382 void Assembler::ud2() { 3383 emit_int16(0x0F, 0x0B); 3384 } 3385 3386 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3387 assert(VM_Version::supports_sse4_2(), ""); 3388 InstructionMark im(this); 3389 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3390 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3391 emit_int8(0x61); 3392 emit_operand(dst, src); 3393 emit_int8(imm8); 3394 } 3395 3396 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3397 assert(VM_Version::supports_sse4_2(), ""); 3398 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3399 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3400 emit_int24(0x61, (0xC0 | encode), imm8); 3401 } 3402 3403 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3404 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3405 assert(VM_Version::supports_sse2(), ""); 3406 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3407 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3408 emit_int16(0x74, (0xC0 | encode)); 3409 } 3410 3411 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3412 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3413 assert(VM_Version::supports_avx(), ""); 3414 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3415 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3416 emit_int16(0x74, (0xC0 | encode)); 3417 } 3418 3419 // In this context, kdst is written the mask used to process the equal components 3420 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3421 assert(VM_Version::supports_avx512bw(), ""); 3422 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3423 attributes.set_is_evex_instruction(); 3424 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3425 emit_int16(0x74, (0xC0 | encode)); 3426 } 3427 3428 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3429 assert(VM_Version::supports_avx512vlbw(), ""); 3430 InstructionMark im(this); 3431 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3432 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3433 attributes.set_is_evex_instruction(); 3434 int dst_enc = kdst->encoding(); 3435 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3436 emit_int8(0x64); 3437 emit_operand(as_Register(dst_enc), src); 3438 } 3439 3440 void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 3441 assert(VM_Version::supports_avx512vlbw(), ""); 3442 InstructionMark im(this); 3443 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3444 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3445 attributes.reset_is_clear_context(); 3446 attributes.set_embedded_opmask_register_specifier(mask); 3447 attributes.set_is_evex_instruction(); 3448 int dst_enc = kdst->encoding(); 3449 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3450 emit_int8(0x64); 3451 emit_operand(as_Register(dst_enc), src); 3452 } 3453 3454 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { 3455 assert(VM_Version::supports_avx512vlbw(), ""); 3456 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3457 attributes.set_is_evex_instruction(); 3458 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3459 emit_int24(0x3E, (0xC0 | encode), vcc); 3460 } 3461 3462 void Assembler::evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { 3463 assert(VM_Version::supports_avx512vlbw(), ""); 3464 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3465 attributes.reset_is_clear_context(); 3466 attributes.set_embedded_opmask_register_specifier(mask); 3467 attributes.set_is_evex_instruction(); 3468 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3469 emit_int24(0x3E, (0xC0 | encode), vcc); 3470 } 3471 3472 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) { 3473 assert(VM_Version::supports_avx512vlbw(), ""); 3474 InstructionMark im(this); 3475 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3476 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3477 attributes.set_is_evex_instruction(); 3478 int dst_enc = kdst->encoding(); 3479 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3480 emit_int8(0x3E); 3481 emit_operand(as_Register(dst_enc), src); 3482 emit_int8(vcc); 3483 } 3484 3485 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3486 assert(VM_Version::supports_avx512bw(), ""); 3487 InstructionMark im(this); 3488 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3489 attributes.set_is_evex_instruction(); 3490 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3491 int dst_enc = kdst->encoding(); 3492 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3493 emit_int8(0x74); 3494 emit_operand(as_Register(dst_enc), src); 3495 } 3496 3497 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 3498 assert(VM_Version::supports_avx512vlbw(), ""); 3499 InstructionMark im(this); 3500 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_reg_mask */ false, /* uses_vl */ true); 3501 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3502 attributes.reset_is_clear_context(); 3503 attributes.set_embedded_opmask_register_specifier(mask); 3504 attributes.set_is_evex_instruction(); 3505 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3506 emit_int8(0x74); 3507 emit_operand(as_Register(kdst->encoding()), src); 3508 } 3509 3510 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3511 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3512 assert(VM_Version::supports_sse2(), ""); 3513 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3514 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3515 emit_int16(0x75, (0xC0 | encode)); 3516 } 3517 3518 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3519 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3520 assert(VM_Version::supports_avx(), ""); 3521 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3522 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3523 emit_int16(0x75, (0xC0 | encode)); 3524 } 3525 3526 // In this context, kdst is written the mask used to process the equal components 3527 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3528 assert(VM_Version::supports_avx512bw(), ""); 3529 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3530 attributes.set_is_evex_instruction(); 3531 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3532 emit_int16(0x75, (0xC0 | encode)); 3533 } 3534 3535 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3536 assert(VM_Version::supports_avx512bw(), ""); 3537 InstructionMark im(this); 3538 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3539 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3540 attributes.set_is_evex_instruction(); 3541 int dst_enc = kdst->encoding(); 3542 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3543 emit_int8(0x75); 3544 emit_operand(as_Register(dst_enc), src); 3545 } 3546 3547 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3548 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { 3549 assert(VM_Version::supports_sse2(), ""); 3550 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3551 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3552 emit_int16(0x76, (0xC0 | encode)); 3553 } 3554 3555 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3556 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3557 assert(VM_Version::supports_avx(), ""); 3558 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3559 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3560 emit_int16(0x76, (0xC0 | encode)); 3561 } 3562 3563 // In this context, kdst is written the mask used to process the equal components 3564 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3565 assert(VM_Version::supports_evex(), ""); 3566 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3567 attributes.set_is_evex_instruction(); 3568 attributes.reset_is_clear_context(); 3569 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3570 emit_int16(0x76, (0xC0 | encode)); 3571 } 3572 3573 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3574 assert(VM_Version::supports_evex(), ""); 3575 InstructionMark im(this); 3576 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3577 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3578 attributes.reset_is_clear_context(); 3579 attributes.set_is_evex_instruction(); 3580 int dst_enc = kdst->encoding(); 3581 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3582 emit_int8(0x76); 3583 emit_operand(as_Register(dst_enc), src); 3584 } 3585 3586 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3587 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) { 3588 assert(VM_Version::supports_sse4_1(), ""); 3589 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3590 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3591 emit_int16(0x29, (0xC0 | encode)); 3592 } 3593 3594 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3595 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3596 assert(VM_Version::supports_avx(), ""); 3597 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3598 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3599 emit_int16(0x29, (0xC0 | encode)); 3600 } 3601 3602 // In this context, kdst is written the mask used to process the equal components 3603 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3604 assert(VM_Version::supports_evex(), ""); 3605 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3606 attributes.reset_is_clear_context(); 3607 attributes.set_is_evex_instruction(); 3608 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3609 emit_int16(0x29, (0xC0 | encode)); 3610 } 3611 3612 // In this context, kdst is written the mask used to process the equal components 3613 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3614 assert(VM_Version::supports_evex(), ""); 3615 InstructionMark im(this); 3616 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3617 attributes.reset_is_clear_context(); 3618 attributes.set_is_evex_instruction(); 3619 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 3620 int dst_enc = kdst->encoding(); 3621 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3622 emit_int8(0x29); 3623 emit_operand(as_Register(dst_enc), src); 3624 } 3625 3626 void Assembler::pmovmskb(Register dst, XMMRegister src) { 3627 assert(VM_Version::supports_sse2(), ""); 3628 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3629 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3630 emit_int16((unsigned char)0xD7, (0xC0 | encode)); 3631 } 3632 3633 void Assembler::vpmovmskb(Register dst, XMMRegister src) { 3634 assert(VM_Version::supports_avx2(), ""); 3635 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3636 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3637 emit_int16((unsigned char)0xD7, (0xC0 | encode)); 3638 } 3639 3640 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 3641 assert(VM_Version::supports_sse4_1(), ""); 3642 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3643 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3644 emit_int24(0x16, (0xC0 | encode), imm8); 3645 } 3646 3647 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) { 3648 assert(VM_Version::supports_sse4_1(), ""); 3649 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3650 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3651 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3652 emit_int8(0x16); 3653 emit_operand(src, dst); 3654 emit_int8(imm8); 3655 } 3656 3657 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 3658 assert(VM_Version::supports_sse4_1(), ""); 3659 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3660 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3661 emit_int24(0x16, (0xC0 | encode), imm8); 3662 } 3663 3664 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) { 3665 assert(VM_Version::supports_sse4_1(), ""); 3666 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3667 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3668 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3669 emit_int8(0x16); 3670 emit_operand(src, dst); 3671 emit_int8(imm8); 3672 } 3673 3674 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { 3675 assert(VM_Version::supports_sse2(), ""); 3676 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3677 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3678 emit_int24((unsigned char)0xC5, (0xC0 | encode), imm8); 3679 } 3680 3681 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) { 3682 assert(VM_Version::supports_sse4_1(), ""); 3683 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3684 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 3685 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3686 emit_int8(0x15); 3687 emit_operand(src, dst); 3688 emit_int8(imm8); 3689 } 3690 3691 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) { 3692 assert(VM_Version::supports_sse4_1(), ""); 3693 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3694 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 3695 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3696 emit_int8(0x14); 3697 emit_operand(src, dst); 3698 emit_int8(imm8); 3699 } 3700 3701 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 3702 assert(VM_Version::supports_sse4_1(), ""); 3703 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3704 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3705 emit_int24(0x22, (0xC0 | encode), imm8); 3706 } 3707 3708 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) { 3709 assert(VM_Version::supports_sse4_1(), ""); 3710 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3711 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3712 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3713 emit_int8(0x22); 3714 emit_operand(dst,src); 3715 emit_int8(imm8); 3716 } 3717 3718 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 3719 assert(VM_Version::supports_sse4_1(), ""); 3720 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3721 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3722 emit_int24(0x22, (0xC0 | encode), imm8); 3723 } 3724 3725 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) { 3726 assert(VM_Version::supports_sse4_1(), ""); 3727 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3728 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3729 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3730 emit_int8(0x22); 3731 emit_operand(dst, src); 3732 emit_int8(imm8); 3733 } 3734 3735 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { 3736 assert(VM_Version::supports_sse2(), ""); 3737 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3738 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3739 emit_int24((unsigned char)0xC4, (0xC0 | encode), imm8); 3740 } 3741 3742 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) { 3743 assert(VM_Version::supports_sse2(), ""); 3744 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3745 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 3746 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3747 emit_int8((unsigned char)0xC4); 3748 emit_operand(dst, src); 3749 emit_int8(imm8); 3750 } 3751 3752 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) { 3753 assert(VM_Version::supports_sse4_1(), ""); 3754 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3755 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 3756 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3757 emit_int8(0x20); 3758 emit_operand(dst, src); 3759 emit_int8(imm8); 3760 } 3761 3762 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 3763 assert(VM_Version::supports_sse4_1(), ""); 3764 InstructionMark im(this); 3765 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3766 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3767 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3768 emit_int8(0x30); 3769 emit_operand(dst, src); 3770 } 3771 3772 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3773 assert(VM_Version::supports_sse4_1(), ""); 3774 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3775 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3776 emit_int16(0x30, (0xC0 | encode)); 3777 } 3778 3779 void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) { 3780 assert(VM_Version::supports_sse4_1(), ""); 3781 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3782 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3783 emit_int16(0x20, (0xC0 | encode)); 3784 } 3785 3786 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3787 assert(VM_Version::supports_avx(), ""); 3788 InstructionMark im(this); 3789 assert(dst != xnoreg, "sanity"); 3790 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3791 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3792 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3793 emit_int8(0x30); 3794 emit_operand(dst, src); 3795 } 3796 3797 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { 3798 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 3799 vector_len == AVX_256bit? VM_Version::supports_avx2() : 3800 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 3801 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3802 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3803 emit_int16(0x30, (unsigned char) (0xC0 | encode)); 3804 } 3805 3806 void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) { 3807 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 3808 vector_len == AVX_256bit? VM_Version::supports_avx2() : 3809 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 3810 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3811 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3812 emit_int16(0x20, (0xC0 | encode)); 3813 } 3814 3815 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) { 3816 assert(VM_Version::supports_avx512vlbw(), ""); 3817 assert(dst != xnoreg, "sanity"); 3818 InstructionMark im(this); 3819 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3820 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3821 attributes.set_embedded_opmask_register_specifier(mask); 3822 attributes.set_is_evex_instruction(); 3823 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3824 emit_int8(0x30); 3825 emit_operand(dst, src); 3826 } 3827 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) { 3828 assert(VM_Version::supports_avx512vlbw(), ""); 3829 assert(src != xnoreg, "sanity"); 3830 InstructionMark im(this); 3831 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3832 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3833 attributes.set_is_evex_instruction(); 3834 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 3835 emit_int8(0x30); 3836 emit_operand(src, dst); 3837 } 3838 3839 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) { 3840 assert(VM_Version::supports_avx512vlbw(), ""); 3841 assert(src != xnoreg, "sanity"); 3842 InstructionMark im(this); 3843 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3844 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3845 attributes.reset_is_clear_context(); 3846 attributes.set_embedded_opmask_register_specifier(mask); 3847 attributes.set_is_evex_instruction(); 3848 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 3849 emit_int8(0x30); 3850 emit_operand(src, dst); 3851 } 3852 3853 void Assembler::evpmovdb(Address dst, XMMRegister src, int vector_len) { 3854 assert(VM_Version::supports_evex(), ""); 3855 assert(src != xnoreg, "sanity"); 3856 InstructionMark im(this); 3857 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3858 attributes.set_address_attributes(/* tuple_type */ EVEX_QVM, /* input_size_in_bits */ EVEX_NObit); 3859 attributes.set_is_evex_instruction(); 3860 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 3861 emit_int8(0x31); 3862 emit_operand(src, dst); 3863 } 3864 3865 void Assembler::vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len) { 3866 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 3867 vector_len == AVX_256bit? VM_Version::supports_avx2() : 3868 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " "); 3869 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3870 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3871 emit_int16(0x33, (0xC0 | encode)); 3872 } 3873 3874 void Assembler::pmaddwd(XMMRegister dst, XMMRegister src) { 3875 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3876 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3877 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3878 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 3879 } 3880 3881 void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3882 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 3883 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : 3884 (vector_len == AVX_512bit ? VM_Version::supports_evex() : 0)), ""); 3885 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3886 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3887 emit_int16((unsigned char)0xF5, (0xC0 | encode)); 3888 } 3889 3890 void Assembler::evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3891 assert(VM_Version::supports_evex(), ""); 3892 assert(VM_Version::supports_avx512_vnni(), "must support vnni"); 3893 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3894 attributes.set_is_evex_instruction(); 3895 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3896 emit_int16(0x52, (0xC0 | encode)); 3897 } 3898 3899 // generic 3900 void Assembler::pop(Register dst) { 3901 int encode = prefix_and_encode(dst->encoding()); 3902 emit_int8(0x58 | encode); 3903 } 3904 3905 void Assembler::popcntl(Register dst, Address src) { 3906 assert(VM_Version::supports_popcnt(), "must support"); 3907 InstructionMark im(this); 3908 emit_int8((unsigned char)0xF3); 3909 prefix(src, dst); 3910 emit_int16(0x0F, (unsigned char)0xB8); 3911 emit_operand(dst, src); 3912 } 3913 3914 void Assembler::popcntl(Register dst, Register src) { 3915 assert(VM_Version::supports_popcnt(), "must support"); 3916 emit_int8((unsigned char)0xF3); 3917 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3918 emit_int24(0x0F, (unsigned char)0xB8, (0xC0 | encode)); 3919 } 3920 3921 void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) { 3922 assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature"); 3923 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3924 attributes.set_is_evex_instruction(); 3925 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3926 emit_int16(0x55, (0xC0 | encode)); 3927 } 3928 3929 void Assembler::popf() { 3930 emit_int8((unsigned char)0x9D); 3931 } 3932 3933 #ifndef _LP64 // no 32bit push/pop on amd64 3934 void Assembler::popl(Address dst) { 3935 // NOTE: this will adjust stack by 8byte on 64bits 3936 InstructionMark im(this); 3937 prefix(dst); 3938 emit_int8((unsigned char)0x8F); 3939 emit_operand(rax, dst); 3940 } 3941 #endif 3942 3943 void Assembler::prefetchnta(Address src) { 3944 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3945 InstructionMark im(this); 3946 prefix(src); 3947 emit_int16(0x0F, 0x18); 3948 emit_operand(rax, src); // 0, src 3949 } 3950 3951 void Assembler::prefetchr(Address src) { 3952 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3953 InstructionMark im(this); 3954 prefix(src); 3955 emit_int16(0x0F, 0x0D); 3956 emit_operand(rax, src); // 0, src 3957 } 3958 3959 void Assembler::prefetcht0(Address src) { 3960 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3961 InstructionMark im(this); 3962 prefix(src); 3963 emit_int16(0x0F, 0x18); 3964 emit_operand(rcx, src); // 1, src 3965 } 3966 3967 void Assembler::prefetcht1(Address src) { 3968 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3969 InstructionMark im(this); 3970 prefix(src); 3971 emit_int16(0x0F, 0x18); 3972 emit_operand(rdx, src); // 2, src 3973 } 3974 3975 void Assembler::prefetcht2(Address src) { 3976 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3977 InstructionMark im(this); 3978 prefix(src); 3979 emit_int16(0x0F, 0x18); 3980 emit_operand(rbx, src); // 3, src 3981 } 3982 3983 void Assembler::prefetchw(Address src) { 3984 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3985 InstructionMark im(this); 3986 prefix(src); 3987 emit_int16(0x0F, 0x0D); 3988 emit_operand(rcx, src); // 1, src 3989 } 3990 3991 void Assembler::prefix(Prefix p) { 3992 emit_int8(p); 3993 } 3994 3995 void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 3996 assert(VM_Version::supports_ssse3(), ""); 3997 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3998 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3999 emit_int16(0x00, (0xC0 | encode)); 4000 } 4001 4002 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4003 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4004 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4005 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 4006 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4007 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4008 emit_int16(0x00, (0xC0 | encode)); 4009 } 4010 4011 void Assembler::pshufb(XMMRegister dst, Address src) { 4012 assert(VM_Version::supports_ssse3(), ""); 4013 InstructionMark im(this); 4014 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4015 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4016 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4017 emit_int8(0x00); 4018 emit_operand(dst, src); 4019 } 4020 4021 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 4022 assert(isByte(mode), "invalid value"); 4023 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4024 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 4025 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4026 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4027 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 4028 } 4029 4030 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) { 4031 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4032 (vector_len == AVX_256bit? VM_Version::supports_avx2() : 4033 (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), ""); 4034 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4035 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4036 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4037 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 4038 } 4039 4040 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { 4041 assert(isByte(mode), "invalid value"); 4042 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4043 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4044 InstructionMark im(this); 4045 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4046 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4047 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4048 emit_int8(0x70); 4049 emit_operand(dst, src); 4050 emit_int8(mode & 0xFF); 4051 } 4052 4053 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 4054 assert(isByte(mode), "invalid value"); 4055 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4056 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4057 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4058 emit_int24(0x70, (0xC0 | encode), mode & 0xFF); 4059 } 4060 4061 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { 4062 assert(isByte(mode), "invalid value"); 4063 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4064 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4065 InstructionMark im(this); 4066 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4067 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4068 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4069 emit_int8(0x70); 4070 emit_operand(dst, src); 4071 emit_int8(mode & 0xFF); 4072 } 4073 4074 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 4075 assert(VM_Version::supports_evex(), "requires EVEX support"); 4076 assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, ""); 4077 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4078 attributes.set_is_evex_instruction(); 4079 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4080 emit_int24(0x43, (0xC0 | encode), imm8 & 0xFF); 4081 } 4082 4083 void Assembler::psrldq(XMMRegister dst, int shift) { 4084 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 4085 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4086 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4087 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4088 emit_int24(0x73, (0xC0 | encode), shift); 4089 } 4090 4091 void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4092 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4093 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4094 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); 4095 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4096 int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4097 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 4098 } 4099 4100 void Assembler::pslldq(XMMRegister dst, int shift) { 4101 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 4102 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4103 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4104 // XMM7 is for /7 encoding: 66 0F 73 /7 ib 4105 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4106 emit_int24(0x73, (0xC0 | encode), shift); 4107 } 4108 4109 void Assembler::vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4110 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4111 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4112 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); 4113 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4114 int encode = vex_prefix_and_encode(xmm7->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4115 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 4116 } 4117 4118 void Assembler::ptest(XMMRegister dst, Address src) { 4119 assert(VM_Version::supports_sse4_1(), ""); 4120 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4121 InstructionMark im(this); 4122 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4123 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4124 emit_int8(0x17); 4125 emit_operand(dst, src); 4126 } 4127 4128 void Assembler::ptest(XMMRegister dst, XMMRegister src) { 4129 assert(VM_Version::supports_sse4_1() || VM_Version::supports_avx(), ""); 4130 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4131 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4132 emit_int8(0x17); 4133 emit_int8((0xC0 | encode)); 4134 } 4135 4136 void Assembler::vptest(XMMRegister dst, Address src) { 4137 assert(VM_Version::supports_avx(), ""); 4138 InstructionMark im(this); 4139 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4140 assert(dst != xnoreg, "sanity"); 4141 // swap src<->dst for encoding 4142 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4143 emit_int8(0x17); 4144 emit_operand(dst, src); 4145 } 4146 4147 void Assembler::vptest(XMMRegister dst, XMMRegister src) { 4148 assert(VM_Version::supports_avx(), ""); 4149 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4150 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4151 emit_int16(0x17, (0xC0 | encode)); 4152 } 4153 4154 void Assembler::punpcklbw(XMMRegister dst, Address src) { 4155 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4156 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4157 InstructionMark im(this); 4158 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 4159 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4160 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4161 emit_int8(0x60); 4162 emit_operand(dst, src); 4163 } 4164 4165 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { 4166 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4167 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 4168 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4169 emit_int16(0x60, (0xC0 | encode)); 4170 } 4171 4172 void Assembler::punpckldq(XMMRegister dst, Address src) { 4173 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4174 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4175 InstructionMark im(this); 4176 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4177 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4178 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4179 emit_int8(0x62); 4180 emit_operand(dst, src); 4181 } 4182 4183 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { 4184 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4185 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4186 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4187 emit_int16(0x62, (0xC0 | encode)); 4188 } 4189 4190 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 4191 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4192 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4193 attributes.set_rex_vex_w_reverted(); 4194 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4195 emit_int16(0x6C, (0xC0 | encode)); 4196 } 4197 4198 void Assembler::push(int32_t imm32) { 4199 // in 64bits we push 64bits onto the stack but only 4200 // take a 32bit immediate 4201 emit_int8(0x68); 4202 emit_int32(imm32); 4203 } 4204 4205 void Assembler::push(Register src) { 4206 int encode = prefix_and_encode(src->encoding()); 4207 emit_int8(0x50 | encode); 4208 } 4209 4210 void Assembler::pushf() { 4211 emit_int8((unsigned char)0x9C); 4212 } 4213 4214 #ifndef _LP64 // no 32bit push/pop on amd64 4215 void Assembler::pushl(Address src) { 4216 // Note this will push 64bit on 64bit 4217 InstructionMark im(this); 4218 prefix(src); 4219 emit_int8((unsigned char)0xFF); 4220 emit_operand(rsi, src); 4221 } 4222 #endif 4223 4224 void Assembler::rcll(Register dst, int imm8) { 4225 assert(isShiftCount(imm8), "illegal shift count"); 4226 int encode = prefix_and_encode(dst->encoding()); 4227 if (imm8 == 1) { 4228 emit_int16((unsigned char)0xD1, (0xD0 | encode)); 4229 } else { 4230 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8); 4231 } 4232 } 4233 4234 void Assembler::rcpps(XMMRegister dst, XMMRegister src) { 4235 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4236 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4237 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4238 emit_int16(0x53, (0xC0 | encode)); 4239 } 4240 4241 void Assembler::rcpss(XMMRegister dst, XMMRegister src) { 4242 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4243 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4244 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4245 emit_int16(0x53, (0xC0 | encode)); 4246 } 4247 4248 void Assembler::rdtsc() { 4249 emit_int16(0x0F, 0x31); 4250 } 4251 4252 // copies data from [esi] to [edi] using rcx pointer sized words 4253 // generic 4254 void Assembler::rep_mov() { 4255 // REP 4256 // MOVSQ 4257 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xA5);) 4258 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xA5);) 4259 } 4260 4261 // sets rcx bytes with rax, value at [edi] 4262 void Assembler::rep_stosb() { 4263 // REP 4264 // STOSB 4265 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAA);) 4266 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAA);) 4267 } 4268 4269 // sets rcx pointer sized words with rax, value at [edi] 4270 // generic 4271 void Assembler::rep_stos() { 4272 // REP 4273 // LP64:STOSQ, LP32:STOSD 4274 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAB);) 4275 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAB);) 4276 } 4277 4278 // scans rcx pointer sized words at [edi] for occurance of rax, 4279 // generic 4280 void Assembler::repne_scan() { // repne_scan 4281 // SCASQ 4282 LP64_ONLY(emit_int24((unsigned char)0xF2, REX_W, (unsigned char)0xAF);) 4283 NOT_LP64( emit_int16((unsigned char)0xF2, (unsigned char)0xAF);) 4284 } 4285 4286 #ifdef _LP64 4287 // scans rcx 4 byte words at [edi] for occurance of rax, 4288 // generic 4289 void Assembler::repne_scanl() { // repne_scan 4290 // SCASL 4291 emit_int16((unsigned char)0xF2, (unsigned char)0xAF); 4292 } 4293 #endif 4294 4295 void Assembler::ret(int imm16) { 4296 if (imm16 == 0) { 4297 emit_int8((unsigned char)0xC3); 4298 } else { 4299 emit_int8((unsigned char)0xC2); 4300 emit_int16(imm16); 4301 } 4302 } 4303 4304 void Assembler::sahf() { 4305 #ifdef _LP64 4306 // Not supported in 64bit mode 4307 ShouldNotReachHere(); 4308 #endif 4309 emit_int8((unsigned char)0x9E); 4310 } 4311 4312 void Assembler::sarl(Register dst, int imm8) { 4313 int encode = prefix_and_encode(dst->encoding()); 4314 assert(isShiftCount(imm8), "illegal shift count"); 4315 if (imm8 == 1) { 4316 emit_int16((unsigned char)0xD1, (0xF8 | encode)); 4317 } else { 4318 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8); 4319 } 4320 } 4321 4322 void Assembler::sarl(Register dst) { 4323 int encode = prefix_and_encode(dst->encoding()); 4324 emit_int16((unsigned char)0xD3, (0xF8 | encode)); 4325 } 4326 4327 void Assembler::sbbl(Address dst, int32_t imm32) { 4328 InstructionMark im(this); 4329 prefix(dst); 4330 emit_arith_operand(0x81, rbx, dst, imm32); 4331 } 4332 4333 void Assembler::sbbl(Register dst, int32_t imm32) { 4334 prefix(dst); 4335 emit_arith(0x81, 0xD8, dst, imm32); 4336 } 4337 4338 4339 void Assembler::sbbl(Register dst, Address src) { 4340 InstructionMark im(this); 4341 prefix(src, dst); 4342 emit_int8(0x1B); 4343 emit_operand(dst, src); 4344 } 4345 4346 void Assembler::sbbl(Register dst, Register src) { 4347 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4348 emit_arith(0x1B, 0xC0, dst, src); 4349 } 4350 4351 void Assembler::setb(Condition cc, Register dst) { 4352 assert(0 <= cc && cc < 16, "illegal cc"); 4353 int encode = prefix_and_encode(dst->encoding(), true); 4354 emit_int24(0x0F, (unsigned char)0x90 | cc, (0xC0 | encode)); 4355 } 4356 4357 void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) { 4358 assert(VM_Version::supports_ssse3(), ""); 4359 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4360 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4361 emit_int24(0x0F, (0xC0 | encode), imm8); 4362 } 4363 4364 void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 4365 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4366 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4367 0, ""); 4368 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4369 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4370 emit_int24(0x0F, (0xC0 | encode), imm8); 4371 } 4372 4373 void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 4374 assert(VM_Version::supports_evex(), ""); 4375 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4376 attributes.set_is_evex_instruction(); 4377 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4378 emit_int24(0x3, (0xC0 | encode), imm8); 4379 } 4380 4381 void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) { 4382 assert(VM_Version::supports_sse4_1(), ""); 4383 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4384 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4385 emit_int24(0x0E, (0xC0 | encode), imm8); 4386 } 4387 4388 void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) { 4389 assert(VM_Version::supports_sha(), ""); 4390 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false); 4391 emit_int24((unsigned char)0xCC, (0xC0 | encode), (unsigned char)imm8); 4392 } 4393 4394 void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) { 4395 assert(VM_Version::supports_sha(), ""); 4396 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4397 emit_int16((unsigned char)0xC8, (0xC0 | encode)); 4398 } 4399 4400 void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) { 4401 assert(VM_Version::supports_sha(), ""); 4402 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4403 emit_int16((unsigned char)0xC9, (0xC0 | encode)); 4404 } 4405 4406 void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) { 4407 assert(VM_Version::supports_sha(), ""); 4408 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4409 emit_int16((unsigned char)0xCA, (0xC0 | encode)); 4410 } 4411 4412 // xmm0 is implicit additional source to this instruction. 4413 void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) { 4414 assert(VM_Version::supports_sha(), ""); 4415 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4416 emit_int16((unsigned char)0xCB, (0xC0 | encode)); 4417 } 4418 4419 void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) { 4420 assert(VM_Version::supports_sha(), ""); 4421 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4422 emit_int16((unsigned char)0xCC, (0xC0 | encode)); 4423 } 4424 4425 void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) { 4426 assert(VM_Version::supports_sha(), ""); 4427 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4428 emit_int16((unsigned char)0xCD, (0xC0 | encode)); 4429 } 4430 4431 4432 void Assembler::shll(Register dst, int imm8) { 4433 assert(isShiftCount(imm8), "illegal shift count"); 4434 int encode = prefix_and_encode(dst->encoding()); 4435 if (imm8 == 1 ) { 4436 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 4437 } else { 4438 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 4439 } 4440 } 4441 4442 void Assembler::shll(Register dst) { 4443 int encode = prefix_and_encode(dst->encoding()); 4444 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 4445 } 4446 4447 void Assembler::shrl(Register dst, int imm8) { 4448 assert(isShiftCount(imm8), "illegal shift count"); 4449 int encode = prefix_and_encode(dst->encoding()); 4450 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8); 4451 } 4452 4453 void Assembler::shrl(Register dst) { 4454 int encode = prefix_and_encode(dst->encoding()); 4455 emit_int16((unsigned char)0xD3, (0xE8 | encode)); 4456 } 4457 4458 void Assembler::shldl(Register dst, Register src) { 4459 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 4460 emit_int24(0x0F, (unsigned char)0xA5, (0xC0 | encode)); 4461 } 4462 4463 void Assembler::shldl(Register dst, Register src, int8_t imm8) { 4464 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 4465 emit_int32(0x0F, (unsigned char)0xA4, (0xC0 | encode), imm8); 4466 } 4467 4468 void Assembler::shrdl(Register dst, Register src) { 4469 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 4470 emit_int24(0x0F, (unsigned char)0xAD, (0xC0 | encode)); 4471 } 4472 4473 void Assembler::shrdl(Register dst, Register src, int8_t imm8) { 4474 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 4475 emit_int32(0x0F, (unsigned char)0xAC, (0xC0 | encode), imm8); 4476 } 4477 4478 // copies a single word from [esi] to [edi] 4479 void Assembler::smovl() { 4480 emit_int8((unsigned char)0xA5); 4481 } 4482 4483 void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { 4484 assert(VM_Version::supports_sse4_1(), ""); 4485 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4486 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4487 emit_int24(0x0B, (0xC0 | encode), (unsigned char)rmode); 4488 } 4489 4490 void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) { 4491 assert(VM_Version::supports_sse4_1(), ""); 4492 InstructionMark im(this); 4493 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4494 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4495 emit_int8(0x0B); 4496 emit_operand(dst, src); 4497 emit_int8((unsigned char)rmode); 4498 } 4499 4500 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { 4501 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4502 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4503 attributes.set_rex_vex_w_reverted(); 4504 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4505 emit_int16(0x51, (0xC0 | encode)); 4506 } 4507 4508 void Assembler::sqrtsd(XMMRegister dst, Address src) { 4509 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4510 InstructionMark im(this); 4511 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4512 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4513 attributes.set_rex_vex_w_reverted(); 4514 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4515 emit_int8(0x51); 4516 emit_operand(dst, src); 4517 } 4518 4519 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 4520 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4521 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4522 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4523 emit_int16(0x51, (0xC0 | encode)); 4524 } 4525 4526 void Assembler::std() { 4527 emit_int8((unsigned char)0xFD); 4528 } 4529 4530 void Assembler::sqrtss(XMMRegister dst, Address src) { 4531 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4532 InstructionMark im(this); 4533 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4534 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4535 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4536 emit_int8(0x51); 4537 emit_operand(dst, src); 4538 } 4539 4540 void Assembler::stmxcsr( Address dst) { 4541 if (UseAVX > 0 ) { 4542 assert(VM_Version::supports_avx(), ""); 4543 InstructionMark im(this); 4544 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4545 vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4546 emit_int8((unsigned char)0xAE); 4547 emit_operand(as_Register(3), dst); 4548 } else { 4549 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4550 InstructionMark im(this); 4551 prefix(dst); 4552 emit_int16(0x0F, (unsigned char)0xAE); 4553 emit_operand(as_Register(3), dst); 4554 } 4555 } 4556 4557 void Assembler::subl(Address dst, int32_t imm32) { 4558 InstructionMark im(this); 4559 prefix(dst); 4560 emit_arith_operand(0x81, rbp, dst, imm32); 4561 } 4562 4563 void Assembler::subl(Address dst, Register src) { 4564 InstructionMark im(this); 4565 prefix(dst, src); 4566 emit_int8(0x29); 4567 emit_operand(src, dst); 4568 } 4569 4570 void Assembler::subl(Register dst, int32_t imm32) { 4571 prefix(dst); 4572 emit_arith(0x81, 0xE8, dst, imm32); 4573 } 4574 4575 // Force generation of a 4 byte immediate value even if it fits into 8bit 4576 void Assembler::subl_imm32(Register dst, int32_t imm32) { 4577 prefix(dst); 4578 emit_arith_imm32(0x81, 0xE8, dst, imm32); 4579 } 4580 4581 void Assembler::subl(Register dst, Address src) { 4582 InstructionMark im(this); 4583 prefix(src, dst); 4584 emit_int8(0x2B); 4585 emit_operand(dst, src); 4586 } 4587 4588 void Assembler::subl(Register dst, Register src) { 4589 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4590 emit_arith(0x2B, 0xC0, dst, src); 4591 } 4592 4593 void Assembler::subsd(XMMRegister dst, XMMRegister src) { 4594 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4595 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4596 attributes.set_rex_vex_w_reverted(); 4597 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4598 emit_int16(0x5C, (0xC0 | encode)); 4599 } 4600 4601 void Assembler::subsd(XMMRegister dst, Address src) { 4602 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4603 InstructionMark im(this); 4604 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4605 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4606 attributes.set_rex_vex_w_reverted(); 4607 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4608 emit_int8(0x5C); 4609 emit_operand(dst, src); 4610 } 4611 4612 void Assembler::subss(XMMRegister dst, XMMRegister src) { 4613 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4614 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false); 4615 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4616 emit_int16(0x5C, (0xC0 | encode)); 4617 } 4618 4619 void Assembler::subss(XMMRegister dst, Address src) { 4620 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4621 InstructionMark im(this); 4622 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4623 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4624 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4625 emit_int8(0x5C); 4626 emit_operand(dst, src); 4627 } 4628 4629 void Assembler::testb(Register dst, int imm8) { 4630 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 4631 (void) prefix_and_encode(dst->encoding(), true); 4632 emit_arith_b(0xF6, 0xC0, dst, imm8); 4633 } 4634 4635 void Assembler::testb(Address dst, int imm8) { 4636 InstructionMark im(this); 4637 prefix(dst); 4638 emit_int8((unsigned char)0xF6); 4639 emit_operand(rax, dst, 1); 4640 emit_int8(imm8); 4641 } 4642 4643 void Assembler::testl(Register dst, int32_t imm32) { 4644 // not using emit_arith because test 4645 // doesn't support sign-extension of 4646 // 8bit operands 4647 int encode = dst->encoding(); 4648 if (encode == 0) { 4649 emit_int8((unsigned char)0xA9); 4650 } else { 4651 encode = prefix_and_encode(encode); 4652 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 4653 } 4654 emit_int32(imm32); 4655 } 4656 4657 void Assembler::testl(Register dst, Register src) { 4658 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4659 emit_arith(0x85, 0xC0, dst, src); 4660 } 4661 4662 void Assembler::testl(Register dst, Address src) { 4663 InstructionMark im(this); 4664 prefix(src, dst); 4665 emit_int8((unsigned char)0x85); 4666 emit_operand(dst, src); 4667 } 4668 4669 void Assembler::tzcntl(Register dst, Register src) { 4670 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 4671 emit_int8((unsigned char)0xF3); 4672 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 4673 emit_int24(0x0F, 4674 (unsigned char)0xBC, 4675 0xC0 | encode); 4676 } 4677 4678 void Assembler::tzcntq(Register dst, Register src) { 4679 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 4680 emit_int8((unsigned char)0xF3); 4681 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 4682 emit_int24(0x0F, (unsigned char)0xBC, (0xC0 | encode)); 4683 } 4684 4685 void Assembler::ucomisd(XMMRegister dst, Address src) { 4686 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4687 InstructionMark im(this); 4688 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4689 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4690 attributes.set_rex_vex_w_reverted(); 4691 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4692 emit_int8(0x2E); 4693 emit_operand(dst, src); 4694 } 4695 4696 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { 4697 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4698 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4699 attributes.set_rex_vex_w_reverted(); 4700 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4701 emit_int16(0x2E, (0xC0 | encode)); 4702 } 4703 4704 void Assembler::ucomiss(XMMRegister dst, Address src) { 4705 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4706 InstructionMark im(this); 4707 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4708 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4709 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4710 emit_int8(0x2E); 4711 emit_operand(dst, src); 4712 } 4713 4714 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { 4715 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4716 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4717 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4718 emit_int16(0x2E, (0xC0 | encode)); 4719 } 4720 4721 void Assembler::xabort(int8_t imm8) { 4722 emit_int24((unsigned char)0xC6, (unsigned char)0xF8, (imm8 & 0xFF)); 4723 } 4724 4725 void Assembler::xaddb(Address dst, Register src) { 4726 InstructionMark im(this); 4727 prefix(dst, src, true); 4728 emit_int16(0x0F, (unsigned char)0xC0); 4729 emit_operand(src, dst); 4730 } 4731 4732 void Assembler::xaddw(Address dst, Register src) { 4733 InstructionMark im(this); 4734 emit_int8(0x66); 4735 prefix(dst, src); 4736 emit_int16(0x0F, (unsigned char)0xC1); 4737 emit_operand(src, dst); 4738 } 4739 4740 void Assembler::xaddl(Address dst, Register src) { 4741 InstructionMark im(this); 4742 prefix(dst, src); 4743 emit_int16(0x0F, (unsigned char)0xC1); 4744 emit_operand(src, dst); 4745 } 4746 4747 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { 4748 InstructionMark im(this); 4749 relocate(rtype); 4750 if (abort.is_bound()) { 4751 address entry = target(abort); 4752 assert(entry != NULL, "abort entry NULL"); 4753 intptr_t offset = entry - pc(); 4754 emit_int16((unsigned char)0xC7, (unsigned char)0xF8); 4755 emit_int32(offset - 6); // 2 opcode + 4 address 4756 } else { 4757 abort.add_patch_at(code(), locator()); 4758 emit_int16((unsigned char)0xC7, (unsigned char)0xF8); 4759 emit_int32(0); 4760 } 4761 } 4762 4763 void Assembler::xchgb(Register dst, Address src) { // xchg 4764 InstructionMark im(this); 4765 prefix(src, dst, true); 4766 emit_int8((unsigned char)0x86); 4767 emit_operand(dst, src); 4768 } 4769 4770 void Assembler::xchgw(Register dst, Address src) { // xchg 4771 InstructionMark im(this); 4772 emit_int8(0x66); 4773 prefix(src, dst); 4774 emit_int8((unsigned char)0x87); 4775 emit_operand(dst, src); 4776 } 4777 4778 void Assembler::xchgl(Register dst, Address src) { // xchg 4779 InstructionMark im(this); 4780 prefix(src, dst); 4781 emit_int8((unsigned char)0x87); 4782 emit_operand(dst, src); 4783 } 4784 4785 void Assembler::xchgl(Register dst, Register src) { 4786 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 4787 emit_int16((unsigned char)0x87, (0xC0 | encode)); 4788 } 4789 4790 void Assembler::xend() { 4791 emit_int24(0x0F, 0x01, (unsigned char)0xD5); 4792 } 4793 4794 void Assembler::xgetbv() { 4795 emit_int24(0x0F, 0x01, (unsigned char)0xD0); 4796 } 4797 4798 void Assembler::xorl(Register dst, int32_t imm32) { 4799 prefix(dst); 4800 emit_arith(0x81, 0xF0, dst, imm32); 4801 } 4802 4803 void Assembler::xorl(Register dst, Address src) { 4804 InstructionMark im(this); 4805 prefix(src, dst); 4806 emit_int8(0x33); 4807 emit_operand(dst, src); 4808 } 4809 4810 void Assembler::xorl(Register dst, Register src) { 4811 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4812 emit_arith(0x33, 0xC0, dst, src); 4813 } 4814 4815 void Assembler::xorb(Register dst, Address src) { 4816 InstructionMark im(this); 4817 prefix(src, dst); 4818 emit_int8(0x32); 4819 emit_operand(dst, src); 4820 } 4821 4822 // AVX 3-operands scalar float-point arithmetic instructions 4823 4824 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { 4825 assert(VM_Version::supports_avx(), ""); 4826 InstructionMark im(this); 4827 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4828 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4829 attributes.set_rex_vex_w_reverted(); 4830 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4831 emit_int8(0x58); 4832 emit_operand(dst, src); 4833 } 4834 4835 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4836 assert(VM_Version::supports_avx(), ""); 4837 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4838 attributes.set_rex_vex_w_reverted(); 4839 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4840 emit_int16(0x58, (0xC0 | encode)); 4841 } 4842 4843 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { 4844 assert(VM_Version::supports_avx(), ""); 4845 InstructionMark im(this); 4846 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4847 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4848 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4849 emit_int8(0x58); 4850 emit_operand(dst, src); 4851 } 4852 4853 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4854 assert(VM_Version::supports_avx(), ""); 4855 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4856 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4857 emit_int16(0x58, (0xC0 | encode)); 4858 } 4859 4860 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { 4861 assert(VM_Version::supports_avx(), ""); 4862 InstructionMark im(this); 4863 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4864 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4865 attributes.set_rex_vex_w_reverted(); 4866 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4867 emit_int8(0x5E); 4868 emit_operand(dst, src); 4869 } 4870 4871 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4872 assert(VM_Version::supports_avx(), ""); 4873 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4874 attributes.set_rex_vex_w_reverted(); 4875 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4876 emit_int16(0x5E, (0xC0 | encode)); 4877 } 4878 4879 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { 4880 assert(VM_Version::supports_avx(), ""); 4881 InstructionMark im(this); 4882 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4883 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4884 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4885 emit_int8(0x5E); 4886 emit_operand(dst, src); 4887 } 4888 4889 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4890 assert(VM_Version::supports_avx(), ""); 4891 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4892 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4893 emit_int16(0x5E, (0xC0 | encode)); 4894 } 4895 4896 void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 4897 assert(VM_Version::supports_fma(), ""); 4898 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4899 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4900 emit_int16((unsigned char)0xB9, (0xC0 | encode)); 4901 } 4902 4903 void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 4904 assert(VM_Version::supports_fma(), ""); 4905 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4906 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4907 emit_int16((unsigned char)0xB9, (0xC0 | encode)); 4908 } 4909 4910 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { 4911 assert(VM_Version::supports_avx(), ""); 4912 InstructionMark im(this); 4913 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4914 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4915 attributes.set_rex_vex_w_reverted(); 4916 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4917 emit_int8(0x59); 4918 emit_operand(dst, src); 4919 } 4920 4921 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4922 assert(VM_Version::supports_avx(), ""); 4923 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4924 attributes.set_rex_vex_w_reverted(); 4925 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4926 emit_int16(0x59, (0xC0 | encode)); 4927 } 4928 4929 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { 4930 assert(VM_Version::supports_avx(), ""); 4931 InstructionMark im(this); 4932 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4933 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4934 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4935 emit_int8(0x59); 4936 emit_operand(dst, src); 4937 } 4938 4939 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4940 assert(VM_Version::supports_avx(), ""); 4941 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4942 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4943 emit_int16(0x59, (0xC0 | encode)); 4944 } 4945 4946 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { 4947 assert(VM_Version::supports_avx(), ""); 4948 InstructionMark im(this); 4949 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4950 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4951 attributes.set_rex_vex_w_reverted(); 4952 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4953 emit_int8(0x5C); 4954 emit_operand(dst, src); 4955 } 4956 4957 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4958 assert(VM_Version::supports_avx(), ""); 4959 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4960 attributes.set_rex_vex_w_reverted(); 4961 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4962 emit_int16(0x5C, (0xC0 | encode)); 4963 } 4964 4965 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { 4966 assert(VM_Version::supports_avx(), ""); 4967 InstructionMark im(this); 4968 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4969 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4970 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4971 emit_int8(0x5C); 4972 emit_operand(dst, src); 4973 } 4974 4975 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4976 assert(VM_Version::supports_avx(), ""); 4977 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4978 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4979 emit_int16(0x5C, (0xC0 | encode)); 4980 } 4981 4982 //====================VECTOR ARITHMETIC===================================== 4983 4984 // Float-point vector arithmetic 4985 4986 void Assembler::addpd(XMMRegister dst, XMMRegister src) { 4987 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4988 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4989 attributes.set_rex_vex_w_reverted(); 4990 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4991 emit_int16(0x58, (0xC0 | encode)); 4992 } 4993 4994 void Assembler::addpd(XMMRegister dst, Address src) { 4995 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4996 InstructionMark im(this); 4997 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4998 attributes.set_rex_vex_w_reverted(); 4999 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5000 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5001 emit_int8(0x58); 5002 emit_operand(dst, src); 5003 } 5004 5005 5006 void Assembler::addps(XMMRegister dst, XMMRegister src) { 5007 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5008 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5009 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5010 emit_int16(0x58, (0xC0 | encode)); 5011 } 5012 5013 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5014 assert(VM_Version::supports_avx(), ""); 5015 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5016 attributes.set_rex_vex_w_reverted(); 5017 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5018 emit_int16(0x58, (0xC0 | encode)); 5019 } 5020 5021 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5022 assert(VM_Version::supports_avx(), ""); 5023 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5024 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5025 emit_int16(0x58, (0xC0 | encode)); 5026 } 5027 5028 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5029 assert(VM_Version::supports_avx(), ""); 5030 InstructionMark im(this); 5031 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5032 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5033 attributes.set_rex_vex_w_reverted(); 5034 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5035 emit_int8(0x58); 5036 emit_operand(dst, src); 5037 } 5038 5039 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5040 assert(VM_Version::supports_avx(), ""); 5041 InstructionMark im(this); 5042 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5043 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5044 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5045 emit_int8(0x58); 5046 emit_operand(dst, src); 5047 } 5048 5049 void Assembler::subpd(XMMRegister dst, XMMRegister src) { 5050 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5051 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5052 attributes.set_rex_vex_w_reverted(); 5053 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5054 emit_int16(0x5C, (0xC0 | encode)); 5055 } 5056 5057 void Assembler::subps(XMMRegister dst, XMMRegister src) { 5058 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5059 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5060 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5061 emit_int16(0x5C, (0xC0 | encode)); 5062 } 5063 5064 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5065 assert(VM_Version::supports_avx(), ""); 5066 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5067 attributes.set_rex_vex_w_reverted(); 5068 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5069 emit_int16(0x5C, (0xC0 | encode)); 5070 } 5071 5072 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5073 assert(VM_Version::supports_avx(), ""); 5074 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5075 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5076 emit_int16(0x5C, (0xC0 | encode)); 5077 } 5078 5079 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5080 assert(VM_Version::supports_avx(), ""); 5081 InstructionMark im(this); 5082 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5083 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5084 attributes.set_rex_vex_w_reverted(); 5085 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5086 emit_int8(0x5C); 5087 emit_operand(dst, src); 5088 } 5089 5090 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5091 assert(VM_Version::supports_avx(), ""); 5092 InstructionMark im(this); 5093 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5094 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5095 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5096 emit_int8(0x5C); 5097 emit_operand(dst, src); 5098 } 5099 5100 void Assembler::mulpd(XMMRegister dst, XMMRegister src) { 5101 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5102 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5103 attributes.set_rex_vex_w_reverted(); 5104 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5105 emit_int16(0x59, (0xC0 | encode)); 5106 } 5107 5108 void Assembler::mulpd(XMMRegister dst, Address src) { 5109 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5110 InstructionMark im(this); 5111 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5112 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5113 attributes.set_rex_vex_w_reverted(); 5114 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5115 emit_int8(0x59); 5116 emit_operand(dst, src); 5117 } 5118 5119 void Assembler::mulps(XMMRegister dst, XMMRegister src) { 5120 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5121 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5122 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5123 emit_int16(0x59, (0xC0 | encode)); 5124 } 5125 5126 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5127 assert(VM_Version::supports_avx(), ""); 5128 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5129 attributes.set_rex_vex_w_reverted(); 5130 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5131 emit_int16(0x59, (0xC0 | encode)); 5132 } 5133 5134 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5135 assert(VM_Version::supports_avx(), ""); 5136 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5137 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5138 emit_int16(0x59, (0xC0 | encode)); 5139 } 5140 5141 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5142 assert(VM_Version::supports_avx(), ""); 5143 InstructionMark im(this); 5144 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5145 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5146 attributes.set_rex_vex_w_reverted(); 5147 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5148 emit_int8(0x59); 5149 emit_operand(dst, src); 5150 } 5151 5152 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5153 assert(VM_Version::supports_avx(), ""); 5154 InstructionMark im(this); 5155 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5156 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5157 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5158 emit_int8(0x59); 5159 emit_operand(dst, src); 5160 } 5161 5162 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5163 assert(VM_Version::supports_fma(), ""); 5164 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5165 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5166 emit_int16((unsigned char)0xB8, (0xC0 | encode)); 5167 } 5168 5169 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5170 assert(VM_Version::supports_fma(), ""); 5171 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5172 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5173 emit_int16((unsigned char)0xB8, (0xC0 | encode)); 5174 } 5175 5176 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 5177 assert(VM_Version::supports_fma(), ""); 5178 InstructionMark im(this); 5179 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5180 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5181 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5182 emit_int8((unsigned char)0xB8); 5183 emit_operand(dst, src2); 5184 } 5185 5186 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 5187 assert(VM_Version::supports_fma(), ""); 5188 InstructionMark im(this); 5189 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5190 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5191 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5192 emit_int8((unsigned char)0xB8); 5193 emit_operand(dst, src2); 5194 } 5195 5196 void Assembler::divpd(XMMRegister dst, XMMRegister src) { 5197 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5198 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5199 attributes.set_rex_vex_w_reverted(); 5200 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5201 emit_int16(0x5E, (0xC0 | encode)); 5202 } 5203 5204 void Assembler::divps(XMMRegister dst, XMMRegister src) { 5205 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5206 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5207 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5208 emit_int16(0x5E, (0xC0 | encode)); 5209 } 5210 5211 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5212 assert(VM_Version::supports_avx(), ""); 5213 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5214 attributes.set_rex_vex_w_reverted(); 5215 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5216 emit_int16(0x5E, (0xC0 | encode)); 5217 } 5218 5219 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5220 assert(VM_Version::supports_avx(), ""); 5221 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5222 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5223 emit_int16(0x5E, (0xC0 | encode)); 5224 } 5225 5226 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5227 assert(VM_Version::supports_avx(), ""); 5228 InstructionMark im(this); 5229 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5230 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5231 attributes.set_rex_vex_w_reverted(); 5232 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5233 emit_int8(0x5E); 5234 emit_operand(dst, src); 5235 } 5236 5237 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5238 assert(VM_Version::supports_avx(), ""); 5239 InstructionMark im(this); 5240 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5241 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5242 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5243 emit_int8(0x5E); 5244 emit_operand(dst, src); 5245 } 5246 5247 void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { 5248 assert(VM_Version::supports_avx(), ""); 5249 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5250 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5251 emit_int24(0x09, (0xC0 | encode), (rmode)); 5252 } 5253 5254 void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { 5255 assert(VM_Version::supports_avx(), ""); 5256 InstructionMark im(this); 5257 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5258 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5259 emit_int8(0x09); 5260 emit_operand(dst, src); 5261 emit_int8((rmode)); 5262 } 5263 5264 void Assembler::vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { 5265 assert(VM_Version::supports_evex(), "requires EVEX support"); 5266 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5267 attributes.set_is_evex_instruction(); 5268 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5269 emit_int24(0x09, (0xC0 | encode), (rmode)); 5270 } 5271 5272 void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { 5273 assert(VM_Version::supports_evex(), "requires EVEX support"); 5274 assert(dst != xnoreg, "sanity"); 5275 InstructionMark im(this); 5276 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5277 attributes.set_is_evex_instruction(); 5278 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5279 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5280 emit_int8(0x09); 5281 emit_operand(dst, src); 5282 emit_int8((rmode)); 5283 } 5284 5285 5286 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) { 5287 assert(VM_Version::supports_avx(), ""); 5288 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5289 attributes.set_rex_vex_w_reverted(); 5290 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5291 emit_int16(0x51, (0xC0 | encode)); 5292 } 5293 5294 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) { 5295 assert(VM_Version::supports_avx(), ""); 5296 InstructionMark im(this); 5297 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5298 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5299 attributes.set_rex_vex_w_reverted(); 5300 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5301 emit_int8(0x51); 5302 emit_operand(dst, src); 5303 } 5304 5305 void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) { 5306 assert(VM_Version::supports_avx(), ""); 5307 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5308 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5309 emit_int16(0x51, (0xC0 | encode)); 5310 } 5311 5312 void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) { 5313 assert(VM_Version::supports_avx(), ""); 5314 InstructionMark im(this); 5315 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5316 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5317 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5318 emit_int8(0x51); 5319 emit_operand(dst, src); 5320 } 5321 5322 void Assembler::andpd(XMMRegister dst, XMMRegister src) { 5323 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5324 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5325 attributes.set_rex_vex_w_reverted(); 5326 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5327 emit_int16(0x54, (0xC0 | encode)); 5328 } 5329 5330 void Assembler::andps(XMMRegister dst, XMMRegister src) { 5331 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5332 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5333 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5334 emit_int16(0x54, (0xC0 | encode)); 5335 } 5336 5337 void Assembler::andps(XMMRegister dst, Address src) { 5338 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5339 InstructionMark im(this); 5340 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5341 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5342 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5343 emit_int8(0x54); 5344 emit_operand(dst, src); 5345 } 5346 5347 void Assembler::andpd(XMMRegister dst, Address src) { 5348 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5349 InstructionMark im(this); 5350 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5351 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5352 attributes.set_rex_vex_w_reverted(); 5353 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5354 emit_int8(0x54); 5355 emit_operand(dst, src); 5356 } 5357 5358 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5359 assert(VM_Version::supports_avx(), ""); 5360 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5361 attributes.set_rex_vex_w_reverted(); 5362 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5363 emit_int16(0x54, (0xC0 | encode)); 5364 } 5365 5366 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5367 assert(VM_Version::supports_avx(), ""); 5368 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5369 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5370 emit_int16(0x54, (0xC0 | encode)); 5371 } 5372 5373 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5374 assert(VM_Version::supports_avx(), ""); 5375 InstructionMark im(this); 5376 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5377 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5378 attributes.set_rex_vex_w_reverted(); 5379 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5380 emit_int8(0x54); 5381 emit_operand(dst, src); 5382 } 5383 5384 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5385 assert(VM_Version::supports_avx(), ""); 5386 InstructionMark im(this); 5387 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5388 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5389 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5390 emit_int8(0x54); 5391 emit_operand(dst, src); 5392 } 5393 5394 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) { 5395 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5396 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5397 attributes.set_rex_vex_w_reverted(); 5398 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5399 emit_int8(0x15); 5400 emit_int8((0xC0 | encode)); 5401 } 5402 5403 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { 5404 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5405 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5406 attributes.set_rex_vex_w_reverted(); 5407 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5408 emit_int16(0x14, (0xC0 | encode)); 5409 } 5410 5411 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { 5412 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5413 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5414 attributes.set_rex_vex_w_reverted(); 5415 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5416 emit_int16(0x57, (0xC0 | encode)); 5417 } 5418 5419 void Assembler::xorps(XMMRegister dst, XMMRegister src) { 5420 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5421 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5422 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5423 emit_int16(0x57, (0xC0 | encode)); 5424 } 5425 5426 void Assembler::xorpd(XMMRegister dst, Address src) { 5427 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5428 InstructionMark im(this); 5429 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5430 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5431 attributes.set_rex_vex_w_reverted(); 5432 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5433 emit_int8(0x57); 5434 emit_operand(dst, src); 5435 } 5436 5437 void Assembler::xorps(XMMRegister dst, Address src) { 5438 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5439 InstructionMark im(this); 5440 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5441 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5442 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5443 emit_int8(0x57); 5444 emit_operand(dst, src); 5445 } 5446 5447 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5448 assert(VM_Version::supports_avx(), ""); 5449 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5450 attributes.set_rex_vex_w_reverted(); 5451 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5452 emit_int16(0x57, (0xC0 | encode)); 5453 } 5454 5455 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5456 assert(VM_Version::supports_avx(), ""); 5457 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5458 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5459 emit_int16(0x57, (0xC0 | encode)); 5460 } 5461 5462 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5463 assert(VM_Version::supports_avx(), ""); 5464 InstructionMark im(this); 5465 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5466 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5467 attributes.set_rex_vex_w_reverted(); 5468 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5469 emit_int8(0x57); 5470 emit_operand(dst, src); 5471 } 5472 5473 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5474 assert(VM_Version::supports_avx(), ""); 5475 InstructionMark im(this); 5476 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5477 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5478 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5479 emit_int8(0x57); 5480 emit_operand(dst, src); 5481 } 5482 5483 // Integer vector arithmetic 5484 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5485 assert(VM_Version::supports_avx() && (vector_len == 0) || 5486 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 5487 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 5488 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5489 emit_int16(0x01, (0xC0 | encode)); 5490 } 5491 5492 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5493 assert(VM_Version::supports_avx() && (vector_len == 0) || 5494 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 5495 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 5496 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5497 emit_int16(0x02, (0xC0 | encode)); 5498 } 5499 5500 void Assembler::paddb(XMMRegister dst, XMMRegister src) { 5501 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5502 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5503 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5504 emit_int16((unsigned char)0xFC, (0xC0 | encode)); 5505 } 5506 5507 void Assembler::paddw(XMMRegister dst, XMMRegister src) { 5508 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5509 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5510 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5511 emit_int16((unsigned char)0xFD, (0xC0 | encode)); 5512 } 5513 5514 void Assembler::paddd(XMMRegister dst, XMMRegister src) { 5515 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5516 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5517 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5518 emit_int16((unsigned char)0xFE, (0xC0 | encode)); 5519 } 5520 5521 void Assembler::paddd(XMMRegister dst, Address src) { 5522 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5523 InstructionMark im(this); 5524 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5525 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5526 emit_int8((unsigned char)0xFE); 5527 emit_operand(dst, src); 5528 } 5529 5530 void Assembler::paddq(XMMRegister dst, XMMRegister src) { 5531 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5532 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5533 attributes.set_rex_vex_w_reverted(); 5534 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5535 emit_int16((unsigned char)0xD4, (0xC0 | encode)); 5536 } 5537 5538 void Assembler::phaddw(XMMRegister dst, XMMRegister src) { 5539 assert(VM_Version::supports_sse3(), ""); 5540 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 5541 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5542 emit_int16(0x01, (0xC0 | encode)); 5543 } 5544 5545 void Assembler::phaddd(XMMRegister dst, XMMRegister src) { 5546 assert(VM_Version::supports_sse3(), ""); 5547 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 5548 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5549 emit_int16(0x02, (0xC0 | encode)); 5550 } 5551 5552 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5553 assert(UseAVX > 0, "requires some form of AVX"); 5554 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5555 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5556 emit_int16((unsigned char)0xFC, (0xC0 | encode)); 5557 } 5558 5559 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5560 assert(UseAVX > 0, "requires some form of AVX"); 5561 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5562 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5563 emit_int16((unsigned char)0xFD, (0xC0 | encode)); 5564 } 5565 5566 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5567 assert(UseAVX > 0, "requires some form of AVX"); 5568 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5569 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5570 emit_int16((unsigned char)0xFE, (0xC0 | encode)); 5571 } 5572 5573 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5574 assert(UseAVX > 0, "requires some form of AVX"); 5575 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5576 attributes.set_rex_vex_w_reverted(); 5577 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5578 emit_int16((unsigned char)0xD4, (0xC0 | encode)); 5579 } 5580 5581 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5582 assert(UseAVX > 0, "requires some form of AVX"); 5583 InstructionMark im(this); 5584 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5585 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5586 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5587 emit_int8((unsigned char)0xFC); 5588 emit_operand(dst, src); 5589 } 5590 5591 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5592 assert(UseAVX > 0, "requires some form of AVX"); 5593 InstructionMark im(this); 5594 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5595 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5596 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5597 emit_int8((unsigned char)0xFD); 5598 emit_operand(dst, src); 5599 } 5600 5601 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5602 assert(UseAVX > 0, "requires some form of AVX"); 5603 InstructionMark im(this); 5604 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5605 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5606 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5607 emit_int8((unsigned char)0xFE); 5608 emit_operand(dst, src); 5609 } 5610 5611 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5612 assert(UseAVX > 0, "requires some form of AVX"); 5613 InstructionMark im(this); 5614 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5615 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5616 attributes.set_rex_vex_w_reverted(); 5617 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5618 emit_int8((unsigned char)0xD4); 5619 emit_operand(dst, src); 5620 } 5621 5622 void Assembler::psubb(XMMRegister dst, XMMRegister src) { 5623 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5624 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5625 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5626 emit_int16((unsigned char)0xF8, (0xC0 | encode)); 5627 } 5628 5629 void Assembler::psubw(XMMRegister dst, XMMRegister src) { 5630 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5631 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5632 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5633 emit_int16((unsigned char)0xF9, (0xC0 | encode)); 5634 } 5635 5636 void Assembler::psubd(XMMRegister dst, XMMRegister src) { 5637 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5638 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5639 emit_int16((unsigned char)0xFA, (0xC0 | encode)); 5640 } 5641 5642 void Assembler::psubq(XMMRegister dst, XMMRegister src) { 5643 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5644 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5645 attributes.set_rex_vex_w_reverted(); 5646 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5647 emit_int8((unsigned char)0xFB); 5648 emit_int8((0xC0 | encode)); 5649 } 5650 5651 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5652 assert(UseAVX > 0, "requires some form of AVX"); 5653 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5654 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5655 emit_int16((unsigned char)0xF8, (0xC0 | encode)); 5656 } 5657 5658 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5659 assert(UseAVX > 0, "requires some form of AVX"); 5660 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5661 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5662 emit_int16((unsigned char)0xF9, (0xC0 | encode)); 5663 } 5664 5665 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5666 assert(UseAVX > 0, "requires some form of AVX"); 5667 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5668 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5669 emit_int16((unsigned char)0xFA, (0xC0 | encode)); 5670 } 5671 5672 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5673 assert(UseAVX > 0, "requires some form of AVX"); 5674 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5675 attributes.set_rex_vex_w_reverted(); 5676 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5677 emit_int16((unsigned char)0xFB, (0xC0 | encode)); 5678 } 5679 5680 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5681 assert(UseAVX > 0, "requires some form of AVX"); 5682 InstructionMark im(this); 5683 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5684 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5685 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5686 emit_int8((unsigned char)0xF8); 5687 emit_operand(dst, src); 5688 } 5689 5690 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5691 assert(UseAVX > 0, "requires some form of AVX"); 5692 InstructionMark im(this); 5693 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5694 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5695 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5696 emit_int8((unsigned char)0xF9); 5697 emit_operand(dst, src); 5698 } 5699 5700 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5701 assert(UseAVX > 0, "requires some form of AVX"); 5702 InstructionMark im(this); 5703 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5704 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5705 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5706 emit_int8((unsigned char)0xFA); 5707 emit_operand(dst, src); 5708 } 5709 5710 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5711 assert(UseAVX > 0, "requires some form of AVX"); 5712 InstructionMark im(this); 5713 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5714 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5715 attributes.set_rex_vex_w_reverted(); 5716 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5717 emit_int8((unsigned char)0xFB); 5718 emit_operand(dst, src); 5719 } 5720 5721 void Assembler::pmullw(XMMRegister dst, XMMRegister src) { 5722 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5723 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5724 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5725 emit_int16((unsigned char)0xD5, (0xC0 | encode)); 5726 } 5727 5728 void Assembler::pmulld(XMMRegister dst, XMMRegister src) { 5729 assert(VM_Version::supports_sse4_1(), ""); 5730 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5731 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5732 emit_int16(0x40, (0xC0 | encode)); 5733 } 5734 5735 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5736 assert(UseAVX > 0, "requires some form of AVX"); 5737 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5738 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5739 emit_int16((unsigned char)0xD5, (0xC0 | encode)); 5740 } 5741 5742 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5743 assert(UseAVX > 0, "requires some form of AVX"); 5744 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5745 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5746 emit_int16(0x40, (0xC0 | encode)); 5747 } 5748 5749 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5750 assert(UseAVX > 2, "requires some form of EVEX"); 5751 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5752 attributes.set_is_evex_instruction(); 5753 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5754 emit_int16(0x40, (0xC0 | encode)); 5755 } 5756 5757 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5758 assert(UseAVX > 0, "requires some form of AVX"); 5759 InstructionMark im(this); 5760 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5761 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5762 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5763 emit_int8((unsigned char)0xD5); 5764 emit_operand(dst, src); 5765 } 5766 5767 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5768 assert(UseAVX > 0, "requires some form of AVX"); 5769 InstructionMark im(this); 5770 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5771 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5772 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5773 emit_int8(0x40); 5774 emit_operand(dst, src); 5775 } 5776 5777 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5778 assert(UseAVX > 2, "requires some form of EVEX"); 5779 InstructionMark im(this); 5780 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5781 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5782 attributes.set_is_evex_instruction(); 5783 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5784 emit_int8(0x40); 5785 emit_operand(dst, src); 5786 } 5787 5788 // Shift packed integers left by specified number of bits. 5789 void Assembler::psllw(XMMRegister dst, int shift) { 5790 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5791 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5792 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 5793 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5794 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 5795 } 5796 5797 void Assembler::pslld(XMMRegister dst, int shift) { 5798 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5799 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5800 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 5801 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5802 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 5803 } 5804 5805 void Assembler::psllq(XMMRegister dst, int shift) { 5806 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5807 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5808 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 5809 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5810 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 5811 } 5812 5813 void Assembler::psllw(XMMRegister dst, XMMRegister shift) { 5814 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5815 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5816 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5817 emit_int16((unsigned char)0xF1, (0xC0 | encode)); 5818 } 5819 5820 void Assembler::pslld(XMMRegister dst, XMMRegister shift) { 5821 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5822 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5823 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5824 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 5825 } 5826 5827 void Assembler::psllq(XMMRegister dst, XMMRegister shift) { 5828 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5829 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5830 attributes.set_rex_vex_w_reverted(); 5831 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5832 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 5833 } 5834 5835 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5836 assert(UseAVX > 0, "requires some form of AVX"); 5837 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5838 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 5839 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5840 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 5841 } 5842 5843 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5844 assert(UseAVX > 0, "requires some form of AVX"); 5845 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5846 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5847 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 5848 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5849 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 5850 } 5851 5852 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5853 assert(UseAVX > 0, "requires some form of AVX"); 5854 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5855 attributes.set_rex_vex_w_reverted(); 5856 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 5857 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5858 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 5859 } 5860 5861 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5862 assert(UseAVX > 0, "requires some form of AVX"); 5863 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5864 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5865 emit_int16((unsigned char)0xF1, (0xC0 | encode)); 5866 } 5867 5868 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5869 assert(UseAVX > 0, "requires some form of AVX"); 5870 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5871 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5872 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 5873 } 5874 5875 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5876 assert(UseAVX > 0, "requires some form of AVX"); 5877 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5878 attributes.set_rex_vex_w_reverted(); 5879 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5880 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 5881 } 5882 5883 // Shift packed integers logically right by specified number of bits. 5884 void Assembler::psrlw(XMMRegister dst, int shift) { 5885 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5886 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5887 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 5888 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5889 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 5890 } 5891 5892 void Assembler::psrld(XMMRegister dst, int shift) { 5893 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5894 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5895 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 5896 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5897 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 5898 } 5899 5900 void Assembler::psrlq(XMMRegister dst, int shift) { 5901 // Do not confuse it with psrldq SSE2 instruction which 5902 // shifts 128 bit value in xmm register by number of bytes. 5903 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5904 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5905 attributes.set_rex_vex_w_reverted(); 5906 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 5907 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5908 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 5909 } 5910 5911 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { 5912 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5913 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5914 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5915 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 5916 } 5917 5918 void Assembler::psrld(XMMRegister dst, XMMRegister shift) { 5919 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5920 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5921 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5922 emit_int16((unsigned char)0xD2, (0xC0 | encode)); 5923 } 5924 5925 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { 5926 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5927 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5928 attributes.set_rex_vex_w_reverted(); 5929 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5930 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 5931 } 5932 5933 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5934 assert(UseAVX > 0, "requires some form of AVX"); 5935 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5936 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 5937 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5938 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 5939 } 5940 5941 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5942 assert(UseAVX > 0, "requires some form of AVX"); 5943 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5944 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 5945 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5946 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 5947 } 5948 5949 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5950 assert(UseAVX > 0, "requires some form of AVX"); 5951 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5952 attributes.set_rex_vex_w_reverted(); 5953 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 5954 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5955 emit_int24(0x73, (0xC0 | encode), shift & 0xFF); 5956 } 5957 5958 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5959 assert(UseAVX > 0, "requires some form of AVX"); 5960 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5961 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5962 emit_int16((unsigned char)0xD1, (0xC0 | encode)); 5963 } 5964 5965 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5966 assert(UseAVX > 0, "requires some form of AVX"); 5967 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5968 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5969 emit_int16((unsigned char)0xD2, (0xC0 | encode)); 5970 } 5971 5972 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5973 assert(UseAVX > 0, "requires some form of AVX"); 5974 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5975 attributes.set_rex_vex_w_reverted(); 5976 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5977 emit_int16((unsigned char)0xD3, (0xC0 | encode)); 5978 } 5979 5980 void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5981 assert(VM_Version::supports_avx512bw(), ""); 5982 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5983 attributes.set_is_evex_instruction(); 5984 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5985 emit_int16(0x10, (0xC0 | encode)); 5986 } 5987 5988 void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5989 assert(VM_Version::supports_avx512bw(), ""); 5990 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5991 attributes.set_is_evex_instruction(); 5992 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5993 emit_int16(0x12, (0xC0 | encode)); 5994 } 5995 5996 // Shift packed integers arithmetically right by specified number of bits. 5997 void Assembler::psraw(XMMRegister dst, int shift) { 5998 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5999 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6000 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 6001 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6002 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 6003 } 6004 6005 void Assembler::psrad(XMMRegister dst, int shift) { 6006 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6007 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6008 // XMM4 is for /4 encoding: 66 0F 72 /4 ib 6009 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6010 emit_int8(0x72); 6011 emit_int8((0xC0 | encode)); 6012 emit_int8(shift & 0xFF); 6013 } 6014 6015 void Assembler::psraw(XMMRegister dst, XMMRegister shift) { 6016 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6017 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6018 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6019 emit_int16((unsigned char)0xE1, (0xC0 | encode)); 6020 } 6021 6022 void Assembler::psrad(XMMRegister dst, XMMRegister shift) { 6023 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6024 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6025 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6026 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 6027 } 6028 6029 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6030 assert(UseAVX > 0, "requires some form of AVX"); 6031 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6032 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 6033 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6034 emit_int24(0x71, (0xC0 | encode), shift & 0xFF); 6035 } 6036 6037 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6038 assert(UseAVX > 0, "requires some form of AVX"); 6039 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6040 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 6041 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6042 emit_int24(0x72, (0xC0 | encode), shift & 0xFF); 6043 } 6044 6045 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6046 assert(UseAVX > 0, "requires some form of AVX"); 6047 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6048 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6049 emit_int16((unsigned char)0xE1, (0xC0 | encode)); 6050 } 6051 6052 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6053 assert(UseAVX > 0, "requires some form of AVX"); 6054 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6055 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6056 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 6057 } 6058 6059 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6060 assert(UseAVX > 2, "requires AVX512"); 6061 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); 6062 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6063 attributes.set_is_evex_instruction(); 6064 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6065 emit_int24((unsigned char)0x72, (0xC0 | encode), shift & 0xFF); 6066 } 6067 6068 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6069 assert(UseAVX > 2, "requires AVX512"); 6070 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); 6071 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6072 attributes.set_is_evex_instruction(); 6073 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6074 emit_int16((unsigned char)0xE2, (0xC0 | encode)); 6075 } 6076 6077 // logical operations packed integers 6078 void Assembler::pand(XMMRegister dst, XMMRegister src) { 6079 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6080 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6081 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6082 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 6083 } 6084 6085 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6086 assert(UseAVX > 0, "requires some form of AVX"); 6087 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6088 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6089 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 6090 } 6091 6092 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6093 assert(UseAVX > 0, "requires some form of AVX"); 6094 InstructionMark im(this); 6095 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6096 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6097 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6098 emit_int8((unsigned char)0xDB); 6099 emit_operand(dst, src); 6100 } 6101 6102 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6103 assert(VM_Version::supports_evex(), ""); 6104 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6105 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6106 emit_int16((unsigned char)0xDB, (0xC0 | encode)); 6107 } 6108 6109 void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6110 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); 6111 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6112 attributes.set_is_evex_instruction(); 6113 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6114 emit_int8(0x71); 6115 emit_int8((0xC0 | encode)); 6116 } 6117 6118 void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6119 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); 6120 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6121 attributes.set_is_evex_instruction(); 6122 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6123 emit_int16(0x73, (0xC0 | encode)); 6124 } 6125 6126 void Assembler::pandn(XMMRegister dst, XMMRegister src) { 6127 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6128 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6129 attributes.set_rex_vex_w_reverted(); 6130 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6131 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 6132 } 6133 6134 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6135 assert(UseAVX > 0, "requires some form of AVX"); 6136 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6137 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6138 emit_int16((unsigned char)0xDF, (0xC0 | encode)); 6139 } 6140 6141 6142 void Assembler::por(XMMRegister dst, XMMRegister src) { 6143 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6144 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6145 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6146 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 6147 } 6148 6149 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6150 assert(UseAVX > 0, "requires some form of AVX"); 6151 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6152 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6153 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 6154 } 6155 6156 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6157 assert(UseAVX > 0, "requires some form of AVX"); 6158 InstructionMark im(this); 6159 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6160 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6161 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6162 emit_int8((unsigned char)0xEB); 6163 emit_operand(dst, src); 6164 } 6165 6166 void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6167 assert(VM_Version::supports_evex(), ""); 6168 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6169 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6170 emit_int16((unsigned char)0xEB, (0xC0 | encode)); 6171 } 6172 6173 6174 void Assembler::pxor(XMMRegister dst, XMMRegister src) { 6175 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6176 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6177 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6178 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 6179 } 6180 6181 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6182 assert(UseAVX > 0, "requires some form of AVX"); 6183 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6184 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6185 emit_int16((unsigned char)0xEF, (0xC0 | encode)); 6186 } 6187 6188 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6189 assert(UseAVX > 0, "requires some form of AVX"); 6190 InstructionMark im(this); 6191 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6192 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6193 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6194 emit_int8((unsigned char)0xEF); 6195 emit_operand(dst, src); 6196 } 6197 6198 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6199 assert(VM_Version::supports_evex(), "requires EVEX support"); 6200 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6201 attributes.set_is_evex_instruction(); 6202 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6203 emit_int8((unsigned char)0xEF); 6204 emit_int8((0xC0 | encode)); 6205 } 6206 6207 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6208 assert(VM_Version::supports_evex(), "requires EVEX support"); 6209 assert(dst != xnoreg, "sanity"); 6210 InstructionMark im(this); 6211 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6212 attributes.set_is_evex_instruction(); 6213 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6214 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6215 emit_int8((unsigned char)0xEF); 6216 emit_operand(dst, src); 6217 } 6218 6219 6220 // vinserti forms 6221 6222 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6223 assert(VM_Version::supports_avx2(), ""); 6224 assert(imm8 <= 0x01, "imm8: %u", imm8); 6225 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6226 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6227 // last byte: 6228 // 0x00 - insert into lower 128 bits 6229 // 0x01 - insert into upper 128 bits 6230 emit_int24(0x38, (0xC0 | encode), imm8 & 0x01); 6231 } 6232 6233 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6234 assert(VM_Version::supports_avx2(), ""); 6235 assert(dst != xnoreg, "sanity"); 6236 assert(imm8 <= 0x01, "imm8: %u", imm8); 6237 InstructionMark im(this); 6238 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6239 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6240 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6241 emit_int8(0x38); 6242 emit_operand(dst, src); 6243 // 0x00 - insert into lower 128 bits 6244 // 0x01 - insert into upper 128 bits 6245 emit_int8(imm8 & 0x01); 6246 } 6247 6248 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6249 assert(VM_Version::supports_evex(), ""); 6250 assert(imm8 <= 0x03, "imm8: %u", imm8); 6251 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6252 attributes.set_is_evex_instruction(); 6253 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6254 // imm8: 6255 // 0x00 - insert into q0 128 bits (0..127) 6256 // 0x01 - insert into q1 128 bits (128..255) 6257 // 0x02 - insert into q2 128 bits (256..383) 6258 // 0x03 - insert into q3 128 bits (384..511) 6259 emit_int24(0x38, (0xC0 | encode), imm8 & 0x03); 6260 } 6261 6262 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6263 assert(VM_Version::supports_avx(), ""); 6264 assert(dst != xnoreg, "sanity"); 6265 assert(imm8 <= 0x03, "imm8: %u", imm8); 6266 InstructionMark im(this); 6267 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6268 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6269 attributes.set_is_evex_instruction(); 6270 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6271 emit_int8(0x18); 6272 emit_operand(dst, src); 6273 // 0x00 - insert into q0 128 bits (0..127) 6274 // 0x01 - insert into q1 128 bits (128..255) 6275 // 0x02 - insert into q2 128 bits (256..383) 6276 // 0x03 - insert into q3 128 bits (384..511) 6277 emit_int8(imm8 & 0x03); 6278 } 6279 6280 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6281 assert(VM_Version::supports_evex(), ""); 6282 assert(imm8 <= 0x01, "imm8: %u", imm8); 6283 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6284 attributes.set_is_evex_instruction(); 6285 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6286 //imm8: 6287 // 0x00 - insert into lower 256 bits 6288 // 0x01 - insert into upper 256 bits 6289 emit_int24(0x3A, (0xC0 | encode), imm8 & 0x01); 6290 } 6291 6292 6293 // vinsertf forms 6294 6295 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6296 assert(VM_Version::supports_avx(), ""); 6297 assert(imm8 <= 0x01, "imm8: %u", imm8); 6298 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6299 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6300 // imm8: 6301 // 0x00 - insert into lower 128 bits 6302 // 0x01 - insert into upper 128 bits 6303 emit_int24(0x18, (0xC0 | encode), imm8 & 0x01); 6304 } 6305 6306 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6307 assert(VM_Version::supports_avx(), ""); 6308 assert(dst != xnoreg, "sanity"); 6309 assert(imm8 <= 0x01, "imm8: %u", imm8); 6310 InstructionMark im(this); 6311 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6312 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6313 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6314 emit_int8(0x18); 6315 emit_operand(dst, src); 6316 // 0x00 - insert into lower 128 bits 6317 // 0x01 - insert into upper 128 bits 6318 emit_int8(imm8 & 0x01); 6319 } 6320 6321 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6322 assert(VM_Version::supports_avx2(), ""); 6323 assert(imm8 <= 0x03, "imm8: %u", imm8); 6324 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6325 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6326 // imm8: 6327 // 0x00 - insert into q0 128 bits (0..127) 6328 // 0x01 - insert into q1 128 bits (128..255) 6329 // 0x02 - insert into q0 128 bits (256..383) 6330 // 0x03 - insert into q1 128 bits (384..512) 6331 emit_int24(0x18, (0xC0 | encode), imm8 & 0x03); 6332 } 6333 6334 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6335 assert(VM_Version::supports_avx(), ""); 6336 assert(dst != xnoreg, "sanity"); 6337 assert(imm8 <= 0x03, "imm8: %u", imm8); 6338 InstructionMark im(this); 6339 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6340 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6341 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6342 emit_int8(0x18); 6343 emit_operand(dst, src); 6344 // 0x00 - insert into q0 128 bits (0..127) 6345 // 0x01 - insert into q1 128 bits (128..255) 6346 // 0x02 - insert into q0 128 bits (256..383) 6347 // 0x03 - insert into q1 128 bits (384..512) 6348 emit_int8(imm8 & 0x03); 6349 } 6350 6351 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6352 assert(VM_Version::supports_evex(), ""); 6353 assert(imm8 <= 0x01, "imm8: %u", imm8); 6354 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6355 attributes.set_is_evex_instruction(); 6356 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6357 // imm8: 6358 // 0x00 - insert into lower 256 bits 6359 // 0x01 - insert into upper 256 bits 6360 emit_int24(0x1A, (0xC0 | encode), imm8 & 0x01); 6361 } 6362 6363 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6364 assert(VM_Version::supports_evex(), ""); 6365 assert(dst != xnoreg, "sanity"); 6366 assert(imm8 <= 0x01, "imm8: %u", imm8); 6367 InstructionMark im(this); 6368 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6369 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 6370 attributes.set_is_evex_instruction(); 6371 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6372 emit_int8(0x1A); 6373 emit_operand(dst, src); 6374 // 0x00 - insert into lower 256 bits 6375 // 0x01 - insert into upper 256 bits 6376 emit_int8(imm8 & 0x01); 6377 } 6378 6379 6380 // vextracti forms 6381 6382 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6383 assert(VM_Version::supports_avx2(), ""); 6384 assert(imm8 <= 0x01, "imm8: %u", imm8); 6385 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6386 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6387 // imm8: 6388 // 0x00 - extract from lower 128 bits 6389 // 0x01 - extract from upper 128 bits 6390 emit_int24(0x39, (0xC0 | encode), imm8 & 0x01); 6391 } 6392 6393 void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 6394 assert(VM_Version::supports_avx2(), ""); 6395 assert(src != xnoreg, "sanity"); 6396 assert(imm8 <= 0x01, "imm8: %u", imm8); 6397 InstructionMark im(this); 6398 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6399 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6400 attributes.reset_is_clear_context(); 6401 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6402 emit_int8(0x39); 6403 emit_operand(src, dst); 6404 // 0x00 - extract from lower 128 bits 6405 // 0x01 - extract from upper 128 bits 6406 emit_int8(imm8 & 0x01); 6407 } 6408 6409 void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6410 assert(VM_Version::supports_evex(), ""); 6411 assert(imm8 <= 0x03, "imm8: %u", imm8); 6412 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6413 attributes.set_is_evex_instruction(); 6414 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6415 // imm8: 6416 // 0x00 - extract from bits 127:0 6417 // 0x01 - extract from bits 255:128 6418 // 0x02 - extract from bits 383:256 6419 // 0x03 - extract from bits 511:384 6420 emit_int24(0x39, (0xC0 | encode), imm8 & 0x03); 6421 } 6422 6423 void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) { 6424 assert(VM_Version::supports_evex(), ""); 6425 assert(src != xnoreg, "sanity"); 6426 assert(imm8 <= 0x03, "imm8: %u", imm8); 6427 InstructionMark im(this); 6428 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6429 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6430 attributes.reset_is_clear_context(); 6431 attributes.set_is_evex_instruction(); 6432 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6433 emit_int8(0x39); 6434 emit_operand(src, dst); 6435 // 0x00 - extract from bits 127:0 6436 // 0x01 - extract from bits 255:128 6437 // 0x02 - extract from bits 383:256 6438 // 0x03 - extract from bits 511:384 6439 emit_int8(imm8 & 0x03); 6440 } 6441 6442 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6443 assert(VM_Version::supports_avx512dq(), ""); 6444 assert(imm8 <= 0x03, "imm8: %u", imm8); 6445 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6446 attributes.set_is_evex_instruction(); 6447 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6448 // imm8: 6449 // 0x00 - extract from bits 127:0 6450 // 0x01 - extract from bits 255:128 6451 // 0x02 - extract from bits 383:256 6452 // 0x03 - extract from bits 511:384 6453 emit_int24(0x39, (0xC0 | encode), imm8 & 0x03); 6454 } 6455 6456 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6457 assert(VM_Version::supports_evex(), ""); 6458 assert(imm8 <= 0x01, "imm8: %u", imm8); 6459 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6460 attributes.set_is_evex_instruction(); 6461 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6462 // imm8: 6463 // 0x00 - extract from lower 256 bits 6464 // 0x01 - extract from upper 256 bits 6465 emit_int24(0x3B, (0xC0 | encode), imm8 & 0x01); 6466 } 6467 6468 void Assembler::vextracti64x4(Address dst, XMMRegister src, uint8_t imm8) { 6469 assert(VM_Version::supports_evex(), ""); 6470 assert(src != xnoreg, "sanity"); 6471 assert(imm8 <= 0x01, "imm8: %u", imm8); 6472 InstructionMark im(this); 6473 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6474 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 6475 attributes.reset_is_clear_context(); 6476 attributes.set_is_evex_instruction(); 6477 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6478 emit_int8(0x38); 6479 emit_operand(src, dst); 6480 // 0x00 - extract from lower 256 bits 6481 // 0x01 - extract from upper 256 bits 6482 emit_int8(imm8 & 0x01); 6483 } 6484 // vextractf forms 6485 6486 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6487 assert(VM_Version::supports_avx(), ""); 6488 assert(imm8 <= 0x01, "imm8: %u", imm8); 6489 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6490 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6491 // imm8: 6492 // 0x00 - extract from lower 128 bits 6493 // 0x01 - extract from upper 128 bits 6494 emit_int24(0x19, (0xC0 | encode), imm8 & 0x01); 6495 } 6496 6497 void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) { 6498 assert(VM_Version::supports_avx(), ""); 6499 assert(src != xnoreg, "sanity"); 6500 assert(imm8 <= 0x01, "imm8: %u", imm8); 6501 InstructionMark im(this); 6502 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6503 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6504 attributes.reset_is_clear_context(); 6505 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6506 emit_int8(0x19); 6507 emit_operand(src, dst); 6508 // 0x00 - extract from lower 128 bits 6509 // 0x01 - extract from upper 128 bits 6510 emit_int8(imm8 & 0x01); 6511 } 6512 6513 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6514 assert(VM_Version::supports_evex(), ""); 6515 assert(imm8 <= 0x03, "imm8: %u", imm8); 6516 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6517 attributes.set_is_evex_instruction(); 6518 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6519 // imm8: 6520 // 0x00 - extract from bits 127:0 6521 // 0x01 - extract from bits 255:128 6522 // 0x02 - extract from bits 383:256 6523 // 0x03 - extract from bits 511:384 6524 emit_int24(0x19, (0xC0 | encode), imm8 & 0x03); 6525 } 6526 6527 void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) { 6528 assert(VM_Version::supports_evex(), ""); 6529 assert(src != xnoreg, "sanity"); 6530 assert(imm8 <= 0x03, "imm8: %u", imm8); 6531 InstructionMark im(this); 6532 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6533 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6534 attributes.reset_is_clear_context(); 6535 attributes.set_is_evex_instruction(); 6536 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6537 emit_int8(0x19); 6538 emit_operand(src, dst); 6539 // 0x00 - extract from bits 127:0 6540 // 0x01 - extract from bits 255:128 6541 // 0x02 - extract from bits 383:256 6542 // 0x03 - extract from bits 511:384 6543 emit_int8(imm8 & 0x03); 6544 } 6545 6546 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6547 assert(VM_Version::supports_avx512dq(), ""); 6548 assert(imm8 <= 0x03, "imm8: %u", imm8); 6549 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6550 attributes.set_is_evex_instruction(); 6551 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6552 // imm8: 6553 // 0x00 - extract from bits 127:0 6554 // 0x01 - extract from bits 255:128 6555 // 0x02 - extract from bits 383:256 6556 // 0x03 - extract from bits 511:384 6557 emit_int24(0x19, (0xC0 | encode), imm8 & 0x03); 6558 } 6559 6560 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6561 assert(VM_Version::supports_evex(), ""); 6562 assert(imm8 <= 0x01, "imm8: %u", imm8); 6563 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6564 attributes.set_is_evex_instruction(); 6565 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6566 // imm8: 6567 // 0x00 - extract from lower 256 bits 6568 // 0x01 - extract from upper 256 bits 6569 emit_int24(0x1B, (0xC0 | encode), imm8 & 0x01); 6570 } 6571 6572 void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) { 6573 assert(VM_Version::supports_evex(), ""); 6574 assert(src != xnoreg, "sanity"); 6575 assert(imm8 <= 0x01, "imm8: %u", imm8); 6576 InstructionMark im(this); 6577 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6578 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit); 6579 attributes.reset_is_clear_context(); 6580 attributes.set_is_evex_instruction(); 6581 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6582 emit_int8(0x1B); 6583 emit_operand(src, dst); 6584 // 0x00 - extract from lower 256 bits 6585 // 0x01 - extract from upper 256 bits 6586 emit_int8(imm8 & 0x01); 6587 } 6588 6589 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 6590 void Assembler::vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { 6591 assert(VM_Version::supports_avx2(), ""); 6592 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6593 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6594 emit_int16(0x78, (0xC0 | encode)); 6595 } 6596 6597 void Assembler::vpbroadcastb(XMMRegister dst, Address src, int vector_len) { 6598 assert(VM_Version::supports_avx2(), ""); 6599 assert(dst != xnoreg, "sanity"); 6600 InstructionMark im(this); 6601 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6602 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 6603 // swap src<->dst for encoding 6604 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6605 emit_int8(0x78); 6606 emit_operand(dst, src); 6607 } 6608 6609 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 6610 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 6611 assert(VM_Version::supports_avx2(), ""); 6612 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6613 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6614 emit_int16(0x79, (0xC0 | encode)); 6615 } 6616 6617 void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) { 6618 assert(VM_Version::supports_avx2(), ""); 6619 assert(dst != xnoreg, "sanity"); 6620 InstructionMark im(this); 6621 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6622 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 6623 // swap src<->dst for encoding 6624 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6625 emit_int8(0x79); 6626 emit_operand(dst, src); 6627 } 6628 6629 // xmm/mem sourced byte/word/dword/qword replicate 6630 6631 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 6632 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { 6633 assert(UseAVX >= 2, ""); 6634 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6635 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6636 emit_int16(0x58, (0xC0 | encode)); 6637 } 6638 6639 void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) { 6640 assert(VM_Version::supports_avx2(), ""); 6641 assert(dst != xnoreg, "sanity"); 6642 InstructionMark im(this); 6643 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6644 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6645 // swap src<->dst for encoding 6646 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6647 emit_int8(0x58); 6648 emit_operand(dst, src); 6649 } 6650 6651 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 6652 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { 6653 assert(VM_Version::supports_avx2(), ""); 6654 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6655 attributes.set_rex_vex_w_reverted(); 6656 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6657 emit_int16(0x59, (0xC0 | encode)); 6658 } 6659 6660 void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) { 6661 assert(VM_Version::supports_avx2(), ""); 6662 assert(dst != xnoreg, "sanity"); 6663 InstructionMark im(this); 6664 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6665 attributes.set_rex_vex_w_reverted(); 6666 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6667 // swap src<->dst for encoding 6668 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6669 emit_int8(0x59); 6670 emit_operand(dst, src); 6671 } 6672 void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) { 6673 assert(vector_len != Assembler::AVX_128bit, ""); 6674 assert(VM_Version::supports_avx512dq(), ""); 6675 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6676 attributes.set_rex_vex_w_reverted(); 6677 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6678 emit_int16(0x5A, (0xC0 | encode)); 6679 } 6680 6681 void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) { 6682 assert(vector_len != Assembler::AVX_128bit, ""); 6683 assert(VM_Version::supports_avx512dq(), ""); 6684 assert(dst != xnoreg, "sanity"); 6685 InstructionMark im(this); 6686 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6687 attributes.set_rex_vex_w_reverted(); 6688 attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit); 6689 // swap src<->dst for encoding 6690 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6691 emit_int8(0x5A); 6692 emit_operand(dst, src); 6693 } 6694 6695 // scalar single/double precision replicate 6696 6697 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL 6698 void Assembler::vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { 6699 assert(VM_Version::supports_avx2(), ""); 6700 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6701 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6702 emit_int16(0x18, (0xC0 | encode)); 6703 } 6704 6705 void Assembler::vbroadcastss(XMMRegister dst, Address src, int vector_len) { 6706 assert(VM_Version::supports_avx(), ""); 6707 assert(dst != xnoreg, "sanity"); 6708 InstructionMark im(this); 6709 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6710 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6711 // swap src<->dst for encoding 6712 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6713 emit_int8(0x18); 6714 emit_operand(dst, src); 6715 } 6716 6717 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL 6718 void Assembler::vbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { 6719 assert(VM_Version::supports_avx2(), ""); 6720 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); 6721 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6722 attributes.set_rex_vex_w_reverted(); 6723 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6724 emit_int16(0x19, (0xC0 | encode)); 6725 } 6726 6727 void Assembler::vbroadcastsd(XMMRegister dst, Address src, int vector_len) { 6728 assert(VM_Version::supports_avx(), ""); 6729 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); 6730 assert(dst != xnoreg, "sanity"); 6731 InstructionMark im(this); 6732 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6733 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6734 attributes.set_rex_vex_w_reverted(); 6735 // swap src<->dst for encoding 6736 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6737 emit_int8(0x19); 6738 emit_operand(dst, src); 6739 } 6740 6741 6742 // gpr source broadcast forms 6743 6744 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 6745 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { 6746 assert(VM_Version::supports_avx512bw(), ""); 6747 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6748 attributes.set_is_evex_instruction(); 6749 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6750 emit_int16(0x7A, (0xC0 | encode)); 6751 } 6752 6753 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 6754 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { 6755 assert(VM_Version::supports_avx512bw(), ""); 6756 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6757 attributes.set_is_evex_instruction(); 6758 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6759 emit_int16(0x7B, (0xC0 | encode)); 6760 } 6761 6762 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 6763 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { 6764 assert(VM_Version::supports_evex(), ""); 6765 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6766 attributes.set_is_evex_instruction(); 6767 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6768 emit_int16(0x7C, (0xC0 | encode)); 6769 } 6770 6771 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 6772 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { 6773 assert(VM_Version::supports_evex(), ""); 6774 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6775 attributes.set_is_evex_instruction(); 6776 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6777 emit_int16(0x7C, (0xC0 | encode)); 6778 } 6779 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) { 6780 assert(VM_Version::supports_evex(), ""); 6781 assert(dst != xnoreg, "sanity"); 6782 InstructionMark im(this); 6783 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6784 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6785 attributes.reset_is_clear_context(); 6786 attributes.set_embedded_opmask_register_specifier(mask); 6787 attributes.set_is_evex_instruction(); 6788 // swap src<->dst for encoding 6789 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6790 emit_int8((unsigned char)0x90); 6791 emit_operand(dst, src); 6792 } 6793 // Carry-Less Multiplication Quadword 6794 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { 6795 assert(VM_Version::supports_clmul(), ""); 6796 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 6797 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6798 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask); 6799 } 6800 6801 // Carry-Less Multiplication Quadword 6802 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 6803 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 6804 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 6805 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6806 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask); 6807 } 6808 6809 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) { 6810 assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support"); 6811 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6812 attributes.set_is_evex_instruction(); 6813 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6814 emit_int24(0x44, (0xC0 | encode), (unsigned char)mask); 6815 } 6816 6817 void Assembler::vzeroupper_uncached() { 6818 if (VM_Version::supports_vzeroupper()) { 6819 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6820 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6821 emit_int8(0x77); 6822 } 6823 } 6824 6825 #ifndef _LP64 6826 // 32bit only pieces of the assembler 6827 6828 void Assembler::emms() { 6829 NOT_LP64(assert(VM_Version::supports_mmx(), "")); 6830 emit_int16(0x0F, 0x77); 6831 } 6832 6833 void Assembler::vzeroupper() { 6834 vzeroupper_uncached(); 6835 } 6836 6837 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 6838 // NO PREFIX AS NEVER 64BIT 6839 InstructionMark im(this); 6840 emit_int16((unsigned char)0x81, (0xF8 | src1->encoding())); 6841 emit_data(imm32, rspec, 0); 6842 } 6843 6844 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { 6845 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs 6846 InstructionMark im(this); 6847 emit_int8((unsigned char)0x81); 6848 emit_operand(rdi, src1); 6849 emit_data(imm32, rspec, 0); 6850 } 6851 6852 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, 6853 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded 6854 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. 6855 void Assembler::cmpxchg8(Address adr) { 6856 InstructionMark im(this); 6857 emit_int16(0x0F, (unsigned char)0xC7); 6858 emit_operand(rcx, adr); 6859 } 6860 6861 void Assembler::decl(Register dst) { 6862 // Don't use it directly. Use MacroAssembler::decrementl() instead. 6863 emit_int8(0x48 | dst->encoding()); 6864 } 6865 6866 // 64bit doesn't use the x87 6867 6868 void Assembler::emit_operand32(Register reg, Address adr) { 6869 assert(reg->encoding() < 8, "no extended registers"); 6870 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 6871 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 6872 adr._rspec); 6873 } 6874 6875 void Assembler::emit_farith(int b1, int b2, int i) { 6876 assert(isByte(b1) && isByte(b2), "wrong opcode"); 6877 assert(0 <= i && i < 8, "illegal stack offset"); 6878 emit_int16(b1, b2 + i); 6879 } 6880 6881 void Assembler::fabs() { 6882 emit_int16((unsigned char)0xD9, (unsigned char)0xE1); 6883 } 6884 6885 void Assembler::fadd(int i) { 6886 emit_farith(0xD8, 0xC0, i); 6887 } 6888 6889 void Assembler::fadd_d(Address src) { 6890 InstructionMark im(this); 6891 emit_int8((unsigned char)0xDC); 6892 emit_operand32(rax, src); 6893 } 6894 6895 void Assembler::fadd_s(Address src) { 6896 InstructionMark im(this); 6897 emit_int8((unsigned char)0xD8); 6898 emit_operand32(rax, src); 6899 } 6900 6901 void Assembler::fadda(int i) { 6902 emit_farith(0xDC, 0xC0, i); 6903 } 6904 6905 void Assembler::faddp(int i) { 6906 emit_farith(0xDE, 0xC0, i); 6907 } 6908 6909 void Assembler::fchs() { 6910 emit_int16((unsigned char)0xD9, (unsigned char)0xE0); 6911 } 6912 6913 void Assembler::fcom(int i) { 6914 emit_farith(0xD8, 0xD0, i); 6915 } 6916 6917 void Assembler::fcomp(int i) { 6918 emit_farith(0xD8, 0xD8, i); 6919 } 6920 6921 void Assembler::fcomp_d(Address src) { 6922 InstructionMark im(this); 6923 emit_int8((unsigned char)0xDC); 6924 emit_operand32(rbx, src); 6925 } 6926 6927 void Assembler::fcomp_s(Address src) { 6928 InstructionMark im(this); 6929 emit_int8((unsigned char)0xD8); 6930 emit_operand32(rbx, src); 6931 } 6932 6933 void Assembler::fcompp() { 6934 emit_int16((unsigned char)0xDE, (unsigned char)0xD9); 6935 } 6936 6937 void Assembler::fcos() { 6938 emit_int16((unsigned char)0xD9, (unsigned char)0xFF); 6939 } 6940 6941 void Assembler::fdecstp() { 6942 emit_int16((unsigned char)0xD9, (unsigned char)0xF6); 6943 } 6944 6945 void Assembler::fdiv(int i) { 6946 emit_farith(0xD8, 0xF0, i); 6947 } 6948 6949 void Assembler::fdiv_d(Address src) { 6950 InstructionMark im(this); 6951 emit_int8((unsigned char)0xDC); 6952 emit_operand32(rsi, src); 6953 } 6954 6955 void Assembler::fdiv_s(Address src) { 6956 InstructionMark im(this); 6957 emit_int8((unsigned char)0xD8); 6958 emit_operand32(rsi, src); 6959 } 6960 6961 void Assembler::fdiva(int i) { 6962 emit_farith(0xDC, 0xF8, i); 6963 } 6964 6965 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) 6966 // is erroneous for some of the floating-point instructions below. 6967 6968 void Assembler::fdivp(int i) { 6969 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) 6970 } 6971 6972 void Assembler::fdivr(int i) { 6973 emit_farith(0xD8, 0xF8, i); 6974 } 6975 6976 void Assembler::fdivr_d(Address src) { 6977 InstructionMark im(this); 6978 emit_int8((unsigned char)0xDC); 6979 emit_operand32(rdi, src); 6980 } 6981 6982 void Assembler::fdivr_s(Address src) { 6983 InstructionMark im(this); 6984 emit_int8((unsigned char)0xD8); 6985 emit_operand32(rdi, src); 6986 } 6987 6988 void Assembler::fdivra(int i) { 6989 emit_farith(0xDC, 0xF0, i); 6990 } 6991 6992 void Assembler::fdivrp(int i) { 6993 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) 6994 } 6995 6996 void Assembler::ffree(int i) { 6997 emit_farith(0xDD, 0xC0, i); 6998 } 6999 7000 void Assembler::fild_d(Address adr) { 7001 InstructionMark im(this); 7002 emit_int8((unsigned char)0xDF); 7003 emit_operand32(rbp, adr); 7004 } 7005 7006 void Assembler::fild_s(Address adr) { 7007 InstructionMark im(this); 7008 emit_int8((unsigned char)0xDB); 7009 emit_operand32(rax, adr); 7010 } 7011 7012 void Assembler::fincstp() { 7013 emit_int16((unsigned char)0xD9, (unsigned char)0xF7); 7014 } 7015 7016 void Assembler::finit() { 7017 emit_int24((unsigned char)0x9B, (unsigned char)0xDB, (unsigned char)0xE3); 7018 } 7019 7020 void Assembler::fist_s(Address adr) { 7021 InstructionMark im(this); 7022 emit_int8((unsigned char)0xDB); 7023 emit_operand32(rdx, adr); 7024 } 7025 7026 void Assembler::fistp_d(Address adr) { 7027 InstructionMark im(this); 7028 emit_int8((unsigned char)0xDF); 7029 emit_operand32(rdi, adr); 7030 } 7031 7032 void Assembler::fistp_s(Address adr) { 7033 InstructionMark im(this); 7034 emit_int8((unsigned char)0xDB); 7035 emit_operand32(rbx, adr); 7036 } 7037 7038 void Assembler::fld1() { 7039 emit_int16((unsigned char)0xD9, (unsigned char)0xE8); 7040 } 7041 7042 void Assembler::fld_d(Address adr) { 7043 InstructionMark im(this); 7044 emit_int8((unsigned char)0xDD); 7045 emit_operand32(rax, adr); 7046 } 7047 7048 void Assembler::fld_s(Address adr) { 7049 InstructionMark im(this); 7050 emit_int8((unsigned char)0xD9); 7051 emit_operand32(rax, adr); 7052 } 7053 7054 7055 void Assembler::fld_s(int index) { 7056 emit_farith(0xD9, 0xC0, index); 7057 } 7058 7059 void Assembler::fld_x(Address adr) { 7060 InstructionMark im(this); 7061 emit_int8((unsigned char)0xDB); 7062 emit_operand32(rbp, adr); 7063 } 7064 7065 void Assembler::fldcw(Address src) { 7066 InstructionMark im(this); 7067 emit_int8((unsigned char)0xD9); 7068 emit_operand32(rbp, src); 7069 } 7070 7071 void Assembler::fldenv(Address src) { 7072 InstructionMark im(this); 7073 emit_int8((unsigned char)0xD9); 7074 emit_operand32(rsp, src); 7075 } 7076 7077 void Assembler::fldlg2() { 7078 emit_int16((unsigned char)0xD9, (unsigned char)0xEC); 7079 } 7080 7081 void Assembler::fldln2() { 7082 emit_int16((unsigned char)0xD9, (unsigned char)0xED); 7083 } 7084 7085 void Assembler::fldz() { 7086 emit_int16((unsigned char)0xD9, (unsigned char)0xEE); 7087 } 7088 7089 void Assembler::flog() { 7090 fldln2(); 7091 fxch(); 7092 fyl2x(); 7093 } 7094 7095 void Assembler::flog10() { 7096 fldlg2(); 7097 fxch(); 7098 fyl2x(); 7099 } 7100 7101 void Assembler::fmul(int i) { 7102 emit_farith(0xD8, 0xC8, i); 7103 } 7104 7105 void Assembler::fmul_d(Address src) { 7106 InstructionMark im(this); 7107 emit_int8((unsigned char)0xDC); 7108 emit_operand32(rcx, src); 7109 } 7110 7111 void Assembler::fmul_s(Address src) { 7112 InstructionMark im(this); 7113 emit_int8((unsigned char)0xD8); 7114 emit_operand32(rcx, src); 7115 } 7116 7117 void Assembler::fmula(int i) { 7118 emit_farith(0xDC, 0xC8, i); 7119 } 7120 7121 void Assembler::fmulp(int i) { 7122 emit_farith(0xDE, 0xC8, i); 7123 } 7124 7125 void Assembler::fnsave(Address dst) { 7126 InstructionMark im(this); 7127 emit_int8((unsigned char)0xDD); 7128 emit_operand32(rsi, dst); 7129 } 7130 7131 void Assembler::fnstcw(Address src) { 7132 InstructionMark im(this); 7133 emit_int16((unsigned char)0x9B, (unsigned char)0xD9); 7134 emit_operand32(rdi, src); 7135 } 7136 7137 void Assembler::fnstsw_ax() { 7138 emit_int16((unsigned char)0xDF, (unsigned char)0xE0); 7139 } 7140 7141 void Assembler::fprem() { 7142 emit_int16((unsigned char)0xD9, (unsigned char)0xF8); 7143 } 7144 7145 void Assembler::fprem1() { 7146 emit_int16((unsigned char)0xD9, (unsigned char)0xF5); 7147 } 7148 7149 void Assembler::frstor(Address src) { 7150 InstructionMark im(this); 7151 emit_int8((unsigned char)0xDD); 7152 emit_operand32(rsp, src); 7153 } 7154 7155 void Assembler::fsin() { 7156 emit_int16((unsigned char)0xD9, (unsigned char)0xFE); 7157 } 7158 7159 void Assembler::fsqrt() { 7160 emit_int16((unsigned char)0xD9, (unsigned char)0xFA); 7161 } 7162 7163 void Assembler::fst_d(Address adr) { 7164 InstructionMark im(this); 7165 emit_int8((unsigned char)0xDD); 7166 emit_operand32(rdx, adr); 7167 } 7168 7169 void Assembler::fst_s(Address adr) { 7170 InstructionMark im(this); 7171 emit_int8((unsigned char)0xD9); 7172 emit_operand32(rdx, adr); 7173 } 7174 7175 void Assembler::fstp_d(Address adr) { 7176 InstructionMark im(this); 7177 emit_int8((unsigned char)0xDD); 7178 emit_operand32(rbx, adr); 7179 } 7180 7181 void Assembler::fstp_d(int index) { 7182 emit_farith(0xDD, 0xD8, index); 7183 } 7184 7185 void Assembler::fstp_s(Address adr) { 7186 InstructionMark im(this); 7187 emit_int8((unsigned char)0xD9); 7188 emit_operand32(rbx, adr); 7189 } 7190 7191 void Assembler::fstp_x(Address adr) { 7192 InstructionMark im(this); 7193 emit_int8((unsigned char)0xDB); 7194 emit_operand32(rdi, adr); 7195 } 7196 7197 void Assembler::fsub(int i) { 7198 emit_farith(0xD8, 0xE0, i); 7199 } 7200 7201 void Assembler::fsub_d(Address src) { 7202 InstructionMark im(this); 7203 emit_int8((unsigned char)0xDC); 7204 emit_operand32(rsp, src); 7205 } 7206 7207 void Assembler::fsub_s(Address src) { 7208 InstructionMark im(this); 7209 emit_int8((unsigned char)0xD8); 7210 emit_operand32(rsp, src); 7211 } 7212 7213 void Assembler::fsuba(int i) { 7214 emit_farith(0xDC, 0xE8, i); 7215 } 7216 7217 void Assembler::fsubp(int i) { 7218 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) 7219 } 7220 7221 void Assembler::fsubr(int i) { 7222 emit_farith(0xD8, 0xE8, i); 7223 } 7224 7225 void Assembler::fsubr_d(Address src) { 7226 InstructionMark im(this); 7227 emit_int8((unsigned char)0xDC); 7228 emit_operand32(rbp, src); 7229 } 7230 7231 void Assembler::fsubr_s(Address src) { 7232 InstructionMark im(this); 7233 emit_int8((unsigned char)0xD8); 7234 emit_operand32(rbp, src); 7235 } 7236 7237 void Assembler::fsubra(int i) { 7238 emit_farith(0xDC, 0xE0, i); 7239 } 7240 7241 void Assembler::fsubrp(int i) { 7242 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) 7243 } 7244 7245 void Assembler::ftan() { 7246 emit_int32((unsigned char)0xD9, (unsigned char)0xF2, (unsigned char)0xDD, (unsigned char)0xD8); 7247 } 7248 7249 void Assembler::ftst() { 7250 emit_int16((unsigned char)0xD9, (unsigned char)0xE4); 7251 } 7252 7253 void Assembler::fucomi(int i) { 7254 // make sure the instruction is supported (introduced for P6, together with cmov) 7255 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 7256 emit_farith(0xDB, 0xE8, i); 7257 } 7258 7259 void Assembler::fucomip(int i) { 7260 // make sure the instruction is supported (introduced for P6, together with cmov) 7261 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 7262 emit_farith(0xDF, 0xE8, i); 7263 } 7264 7265 void Assembler::fwait() { 7266 emit_int8((unsigned char)0x9B); 7267 } 7268 7269 void Assembler::fxch(int i) { 7270 emit_farith(0xD9, 0xC8, i); 7271 } 7272 7273 void Assembler::fyl2x() { 7274 emit_int16((unsigned char)0xD9, (unsigned char)0xF1); 7275 } 7276 7277 void Assembler::frndint() { 7278 emit_int16((unsigned char)0xD9, (unsigned char)0xFC); 7279 } 7280 7281 void Assembler::f2xm1() { 7282 emit_int16((unsigned char)0xD9, (unsigned char)0xF0); 7283 } 7284 7285 void Assembler::fldl2e() { 7286 emit_int16((unsigned char)0xD9, (unsigned char)0xEA); 7287 } 7288 #endif // !_LP64 7289 7290 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. 7291 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; 7292 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding. 7293 static int simd_opc[4] = { 0, 0, 0x38, 0x3A }; 7294 7295 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding. 7296 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 7297 if (pre > 0) { 7298 emit_int8(simd_pre[pre]); 7299 } 7300 if (rex_w) { 7301 prefixq(adr, xreg); 7302 } else { 7303 prefix(adr, xreg); 7304 } 7305 if (opc > 0) { 7306 emit_int8(0x0F); 7307 int opc2 = simd_opc[opc]; 7308 if (opc2 > 0) { 7309 emit_int8(opc2); 7310 } 7311 } 7312 } 7313 7314 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 7315 if (pre > 0) { 7316 emit_int8(simd_pre[pre]); 7317 } 7318 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc); 7319 if (opc > 0) { 7320 emit_int8(0x0F); 7321 int opc2 = simd_opc[opc]; 7322 if (opc2 > 0) { 7323 emit_int8(opc2); 7324 } 7325 } 7326 return encode; 7327 } 7328 7329 7330 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) { 7331 int vector_len = _attributes->get_vector_len(); 7332 bool vex_w = _attributes->is_rex_vex_w(); 7333 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { 7334 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); 7335 byte1 = (~byte1) & 0xE0; 7336 byte1 |= opc; 7337 7338 int byte2 = ((~nds_enc) & 0xf) << 3; 7339 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; 7340 7341 emit_int24((unsigned char)VEX_3bytes, byte1, byte2); 7342 } else { 7343 int byte1 = vex_r ? VEX_R : 0; 7344 byte1 = (~byte1) & 0x80; 7345 byte1 |= ((~nds_enc) & 0xf) << 3; 7346 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; 7347 emit_int16((unsigned char)VEX_2bytes, byte1); 7348 } 7349 } 7350 7351 // This is a 4 byte encoding 7352 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, int nds_enc, VexSimdPrefix pre, VexOpcode opc){ 7353 // EVEX 0x62 prefix 7354 // byte1 = EVEX_4bytes; 7355 7356 bool vex_w = _attributes->is_rex_vex_w(); 7357 int evex_encoding = (vex_w ? VEX_W : 0); 7358 // EVEX.b is not currently used for broadcast of single element or data rounding modes 7359 _attributes->set_evex_encoding(evex_encoding); 7360 7361 // P0: byte 2, initialized to RXBR`00mm 7362 // instead of not'd 7363 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); 7364 byte2 = (~byte2) & 0xF0; 7365 // confine opc opcode extensions in mm bits to lower two bits 7366 // of form {0F, 0F_38, 0F_3A} 7367 byte2 |= opc; 7368 7369 // P1: byte 3 as Wvvvv1pp 7370 int byte3 = ((~nds_enc) & 0xf) << 3; 7371 // p[10] is always 1 7372 byte3 |= EVEX_F; 7373 byte3 |= (vex_w & 1) << 7; 7374 // confine pre opcode extensions in pp bits to lower two bits 7375 // of form {66, F3, F2} 7376 byte3 |= pre; 7377 7378 // P2: byte 4 as zL'Lbv'aaa 7379 // kregs are implemented in the low 3 bits as aaa 7380 int byte4 = (_attributes->is_no_reg_mask()) ? 7381 0 : 7382 _attributes->get_embedded_opmask_register_specifier(); 7383 // EVEX.v` for extending EVEX.vvvv or VIDX 7384 byte4 |= (evex_v ? 0: EVEX_V); 7385 // third EXEC.b for broadcast actions 7386 byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0); 7387 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024 7388 byte4 |= ((_attributes->get_vector_len())& 0x3) << 5; 7389 // last is EVEX.z for zero/merge actions 7390 if (_attributes->is_no_reg_mask() == false) { 7391 byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0); 7392 } 7393 7394 emit_int32(EVEX_4bytes, byte2, byte3, byte4); 7395 } 7396 7397 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 7398 bool vex_r = (xreg_enc & 8) == 8; 7399 bool vex_b = adr.base_needs_rex(); 7400 bool vex_x; 7401 if (adr.isxmmindex()) { 7402 vex_x = adr.xmmindex_needs_rex(); 7403 } else { 7404 vex_x = adr.index_needs_rex(); 7405 } 7406 set_attributes(attributes); 7407 attributes->set_current_assembler(this); 7408 7409 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction 7410 // is allowed in legacy mode and has resources which will fit in it. 7411 // Pure EVEX instructions will have is_evex_instruction set in their definition. 7412 if (!attributes->is_legacy_mode()) { 7413 if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { 7414 if ((attributes->get_vector_len() != AVX_512bit) && (nds_enc < 16) && (xreg_enc < 16)) { 7415 attributes->set_is_legacy_mode(); 7416 } 7417 } 7418 } 7419 7420 if (UseAVX > 2) { 7421 assert(((!attributes->uses_vl()) || 7422 (attributes->get_vector_len() == AVX_512bit) || 7423 (!_legacy_mode_vl) || 7424 (attributes->is_legacy_mode())),"XMM register should be 0-15"); 7425 assert(((nds_enc < 16 && xreg_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); 7426 } 7427 7428 clear_managed(); 7429 if (UseAVX > 2 && !attributes->is_legacy_mode()) 7430 { 7431 bool evex_r = (xreg_enc >= 16); 7432 bool evex_v; 7433 // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31) 7434 if (adr.isxmmindex()) { 7435 evex_v = ((adr._xmmindex->encoding() > 15) ? true : false); 7436 } else { 7437 evex_v = (nds_enc >= 16); 7438 } 7439 attributes->set_is_evex_instruction(); 7440 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 7441 } else { 7442 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 7443 attributes->set_rex_vex_w(false); 7444 } 7445 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 7446 } 7447 } 7448 7449 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 7450 bool vex_r = (dst_enc & 8) == 8; 7451 bool vex_b = (src_enc & 8) == 8; 7452 bool vex_x = false; 7453 set_attributes(attributes); 7454 attributes->set_current_assembler(this); 7455 7456 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction 7457 // is allowed in legacy mode and has resources which will fit in it. 7458 // Pure EVEX instructions will have is_evex_instruction set in their definition. 7459 if (!attributes->is_legacy_mode()) { 7460 if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { 7461 if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) && 7462 (dst_enc < 16) && (nds_enc < 16) && (src_enc < 16)) { 7463 attributes->set_is_legacy_mode(); 7464 } 7465 } 7466 } 7467 7468 if (UseAVX > 2) { 7469 // All the scalar fp instructions (with uses_vl as false) can have legacy_mode as false 7470 // Instruction with uses_vl true are vector instructions 7471 // All the vector instructions with AVX_512bit length can have legacy_mode as false 7472 // All the vector instructions with < AVX_512bit length can have legacy_mode as false if AVX512vl() is supported 7473 // Rest all should have legacy_mode set as true 7474 assert(((!attributes->uses_vl()) || 7475 (attributes->get_vector_len() == AVX_512bit) || 7476 (!_legacy_mode_vl) || 7477 (attributes->is_legacy_mode())),"XMM register should be 0-15"); 7478 // Instruction with legacy_mode true should have dst, nds and src < 15 7479 assert(((dst_enc < 16 && nds_enc < 16 && src_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); 7480 } 7481 7482 clear_managed(); 7483 if (UseAVX > 2 && !attributes->is_legacy_mode()) 7484 { 7485 bool evex_r = (dst_enc >= 16); 7486 bool evex_v = (nds_enc >= 16); 7487 // can use vex_x as bank extender on rm encoding 7488 vex_x = (src_enc >= 16); 7489 attributes->set_is_evex_instruction(); 7490 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 7491 } else { 7492 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 7493 attributes->set_rex_vex_w(false); 7494 } 7495 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 7496 } 7497 7498 // return modrm byte components for operands 7499 return (((dst_enc & 7) << 3) | (src_enc & 7)); 7500 } 7501 7502 7503 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 7504 VexOpcode opc, InstructionAttr *attributes) { 7505 if (UseAVX > 0) { 7506 int xreg_enc = xreg->encoding(); 7507 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 7508 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, attributes); 7509 } else { 7510 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); 7511 rex_prefix(adr, xreg, pre, opc, attributes->is_rex_vex_w()); 7512 } 7513 } 7514 7515 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 7516 VexOpcode opc, InstructionAttr *attributes) { 7517 int dst_enc = dst->encoding(); 7518 int src_enc = src->encoding(); 7519 if (UseAVX > 0) { 7520 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 7521 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes); 7522 } else { 7523 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); 7524 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, attributes->is_rex_vex_w()); 7525 } 7526 } 7527 7528 void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 7529 assert(VM_Version::supports_avx(), ""); 7530 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7531 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 7532 emit_int16(0x5F, (0xC0 | encode)); 7533 } 7534 7535 void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 7536 assert(VM_Version::supports_avx(), ""); 7537 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7538 attributes.set_rex_vex_w_reverted(); 7539 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 7540 emit_int16(0x5F, (0xC0 | encode)); 7541 } 7542 7543 void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 7544 assert(VM_Version::supports_avx(), ""); 7545 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7546 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 7547 emit_int16(0x5D, (0xC0 | encode)); 7548 } 7549 7550 void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 7551 assert(VM_Version::supports_avx(), ""); 7552 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7553 attributes.set_rex_vex_w_reverted(); 7554 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 7555 emit_int16(0x5D, (0xC0 | encode)); 7556 } 7557 7558 void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { 7559 assert(VM_Version::supports_avx(), ""); 7560 assert(vector_len <= AVX_256bit, ""); 7561 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7562 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7563 emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop)); 7564 } 7565 7566 void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 7567 assert(VM_Version::supports_avx(), ""); 7568 assert(vector_len <= AVX_256bit, ""); 7569 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7570 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7571 int src2_enc = src2->encoding(); 7572 emit_int24(0x4B, (0xC0 | encode), (0xF0 & src2_enc << 4)); 7573 } 7574 7575 void Assembler::cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { 7576 assert(VM_Version::supports_avx(), ""); 7577 assert(vector_len <= AVX_256bit, ""); 7578 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7579 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7580 emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop)); 7581 } 7582 7583 void Assembler::blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 7584 assert(VM_Version::supports_avx(), ""); 7585 assert(vector_len <= AVX_256bit, ""); 7586 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7587 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7588 int src2_enc = src2->encoding(); 7589 emit_int24(0x4A, (0xC0 | encode), (0xF0 & src2_enc << 4)); 7590 } 7591 7592 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 7593 assert(VM_Version::supports_avx2(), ""); 7594 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7595 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7596 emit_int24(0x02, (0xC0 | encode), (unsigned char)imm8); 7597 } 7598 7599 void Assembler::shlxl(Register dst, Register src1, Register src2) { 7600 assert(VM_Version::supports_bmi2(), ""); 7601 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7602 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7603 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 7604 } 7605 7606 void Assembler::shlxq(Register dst, Register src1, Register src2) { 7607 assert(VM_Version::supports_bmi2(), ""); 7608 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7609 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7610 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 7611 } 7612 7613 #ifndef _LP64 7614 7615 void Assembler::incl(Register dst) { 7616 // Don't use it directly. Use MacroAssembler::incrementl() instead. 7617 emit_int8(0x40 | dst->encoding()); 7618 } 7619 7620 void Assembler::lea(Register dst, Address src) { 7621 leal(dst, src); 7622 } 7623 7624 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { 7625 InstructionMark im(this); 7626 emit_int8((unsigned char)0xC7); 7627 emit_operand(rax, dst); 7628 emit_data((int)imm32, rspec, 0); 7629 } 7630 7631 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { 7632 InstructionMark im(this); 7633 int encode = prefix_and_encode(dst->encoding()); 7634 emit_int8((0xB8 | encode)); 7635 emit_data((int)imm32, rspec, 0); 7636 } 7637 7638 void Assembler::popa() { // 32bit 7639 emit_int8(0x61); 7640 } 7641 7642 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { 7643 InstructionMark im(this); 7644 emit_int8(0x68); 7645 emit_data(imm32, rspec, 0); 7646 } 7647 7648 void Assembler::pusha() { // 32bit 7649 emit_int8(0x60); 7650 } 7651 7652 void Assembler::set_byte_if_not_zero(Register dst) { 7653 emit_int24(0x0F, (unsigned char)0x95, (0xE0 | dst->encoding())); 7654 } 7655 7656 #else // LP64 7657 7658 void Assembler::set_byte_if_not_zero(Register dst) { 7659 int enc = prefix_and_encode(dst->encoding(), true); 7660 emit_int24(0x0F, (unsigned char)0x95, (0xE0 | enc)); 7661 } 7662 7663 // 64bit only pieces of the assembler 7664 // This should only be used by 64bit instructions that can use rip-relative 7665 // it cannot be used by instructions that want an immediate value. 7666 7667 bool Assembler::reachable(AddressLiteral adr) { 7668 int64_t disp; 7669 relocInfo::relocType relocType = adr.reloc(); 7670 7671 // None will force a 64bit literal to the code stream. Likely a placeholder 7672 // for something that will be patched later and we need to certain it will 7673 // always be reachable. 7674 if (relocType == relocInfo::none) { 7675 return false; 7676 } 7677 if (relocType == relocInfo::internal_word_type) { 7678 // This should be rip relative and easily reachable. 7679 return true; 7680 } 7681 if (relocType == relocInfo::virtual_call_type || 7682 relocType == relocInfo::opt_virtual_call_type || 7683 relocType == relocInfo::static_call_type || 7684 relocType == relocInfo::static_stub_type ) { 7685 // This should be rip relative within the code cache and easily 7686 // reachable until we get huge code caches. (At which point 7687 // ic code is going to have issues). 7688 return true; 7689 } 7690 if (relocType != relocInfo::external_word_type && 7691 relocType != relocInfo::poll_return_type && // these are really external_word but need special 7692 relocType != relocInfo::poll_type && // relocs to identify them 7693 relocType != relocInfo::runtime_call_type ) { 7694 return false; 7695 } 7696 7697 // Stress the correction code 7698 if (ForceUnreachable) { 7699 // Must be runtimecall reloc, see if it is in the codecache 7700 // Flipping stuff in the codecache to be unreachable causes issues 7701 // with things like inline caches where the additional instructions 7702 // are not handled. 7703 if (CodeCache::find_blob(adr._target) == NULL) { 7704 return false; 7705 } 7706 } 7707 // For external_word_type/runtime_call_type if it is reachable from where we 7708 // are now (possibly a temp buffer) and where we might end up 7709 // anywhere in the codeCache then we are always reachable. 7710 // This would have to change if we ever save/restore shared code 7711 // to be more pessimistic. 7712 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); 7713 if (!is_simm32(disp)) return false; 7714 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); 7715 if (!is_simm32(disp)) return false; 7716 7717 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int)); 7718 7719 // Because rip relative is a disp + address_of_next_instruction and we 7720 // don't know the value of address_of_next_instruction we apply a fudge factor 7721 // to make sure we will be ok no matter the size of the instruction we get placed into. 7722 // We don't have to fudge the checks above here because they are already worst case. 7723 7724 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal 7725 // + 4 because better safe than sorry. 7726 const int fudge = 12 + 4; 7727 if (disp < 0) { 7728 disp -= fudge; 7729 } else { 7730 disp += fudge; 7731 } 7732 return is_simm32(disp); 7733 } 7734 7735 void Assembler::emit_data64(jlong data, 7736 relocInfo::relocType rtype, 7737 int format) { 7738 if (rtype == relocInfo::none) { 7739 emit_int64(data); 7740 } else { 7741 emit_data64(data, Relocation::spec_simple(rtype), format); 7742 } 7743 } 7744 7745 void Assembler::emit_data64(jlong data, 7746 RelocationHolder const& rspec, 7747 int format) { 7748 assert(imm_operand == 0, "default format must be immediate in this file"); 7749 assert(imm_operand == format, "must be immediate"); 7750 assert(inst_mark() != NULL, "must be inside InstructionMark"); 7751 // Do not use AbstractAssembler::relocate, which is not intended for 7752 // embedded words. Instead, relocate to the enclosing instruction. 7753 code_section()->relocate(inst_mark(), rspec, format); 7754 #ifdef ASSERT 7755 check_relocation(rspec, format); 7756 #endif 7757 emit_int64(data); 7758 } 7759 7760 void Assembler::prefix(Register reg) { 7761 if (reg->encoding() >= 8) { 7762 prefix(REX_B); 7763 } 7764 } 7765 7766 void Assembler::prefix(Register dst, Register src, Prefix p) { 7767 if (src->encoding() >= 8) { 7768 p = (Prefix)(p | REX_B); 7769 } 7770 if (dst->encoding() >= 8) { 7771 p = (Prefix)(p | REX_R); 7772 } 7773 if (p != Prefix_EMPTY) { 7774 // do not generate an empty prefix 7775 prefix(p); 7776 } 7777 } 7778 7779 void Assembler::prefix(Register dst, Address adr, Prefix p) { 7780 if (adr.base_needs_rex()) { 7781 if (adr.index_needs_rex()) { 7782 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 7783 } else { 7784 prefix(REX_B); 7785 } 7786 } else { 7787 if (adr.index_needs_rex()) { 7788 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 7789 } 7790 } 7791 if (dst->encoding() >= 8) { 7792 p = (Prefix)(p | REX_R); 7793 } 7794 if (p != Prefix_EMPTY) { 7795 // do not generate an empty prefix 7796 prefix(p); 7797 } 7798 } 7799 7800 void Assembler::prefix(Address adr) { 7801 if (adr.base_needs_rex()) { 7802 if (adr.index_needs_rex()) { 7803 prefix(REX_XB); 7804 } else { 7805 prefix(REX_B); 7806 } 7807 } else { 7808 if (adr.index_needs_rex()) { 7809 prefix(REX_X); 7810 } 7811 } 7812 } 7813 7814 void Assembler::prefix(Address adr, Register reg, bool byteinst) { 7815 if (reg->encoding() < 8) { 7816 if (adr.base_needs_rex()) { 7817 if (adr.index_needs_rex()) { 7818 prefix(REX_XB); 7819 } else { 7820 prefix(REX_B); 7821 } 7822 } else { 7823 if (adr.index_needs_rex()) { 7824 prefix(REX_X); 7825 } else if (byteinst && reg->encoding() >= 4) { 7826 prefix(REX); 7827 } 7828 } 7829 } else { 7830 if (adr.base_needs_rex()) { 7831 if (adr.index_needs_rex()) { 7832 prefix(REX_RXB); 7833 } else { 7834 prefix(REX_RB); 7835 } 7836 } else { 7837 if (adr.index_needs_rex()) { 7838 prefix(REX_RX); 7839 } else { 7840 prefix(REX_R); 7841 } 7842 } 7843 } 7844 } 7845 7846 void Assembler::prefix(Address adr, XMMRegister reg) { 7847 if (reg->encoding() < 8) { 7848 if (adr.base_needs_rex()) { 7849 if (adr.index_needs_rex()) { 7850 prefix(REX_XB); 7851 } else { 7852 prefix(REX_B); 7853 } 7854 } else { 7855 if (adr.index_needs_rex()) { 7856 prefix(REX_X); 7857 } 7858 } 7859 } else { 7860 if (adr.base_needs_rex()) { 7861 if (adr.index_needs_rex()) { 7862 prefix(REX_RXB); 7863 } else { 7864 prefix(REX_RB); 7865 } 7866 } else { 7867 if (adr.index_needs_rex()) { 7868 prefix(REX_RX); 7869 } else { 7870 prefix(REX_R); 7871 } 7872 } 7873 } 7874 } 7875 7876 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 7877 if (reg_enc >= 8) { 7878 prefix(REX_B); 7879 reg_enc -= 8; 7880 } else if (byteinst && reg_enc >= 4) { 7881 prefix(REX); 7882 } 7883 return reg_enc; 7884 } 7885 7886 int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte) { 7887 if (dst_enc < 8) { 7888 if (src_enc >= 8) { 7889 prefix(REX_B); 7890 src_enc -= 8; 7891 } else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) { 7892 prefix(REX); 7893 } 7894 } else { 7895 if (src_enc < 8) { 7896 prefix(REX_R); 7897 } else { 7898 prefix(REX_RB); 7899 src_enc -= 8; 7900 } 7901 dst_enc -= 8; 7902 } 7903 return dst_enc << 3 | src_enc; 7904 } 7905 7906 int8_t Assembler::get_prefixq(Address adr) { 7907 int8_t prfx = get_prefixq(adr, rax); 7908 assert(REX_W <= prfx && prfx <= REX_WXB, "must be"); 7909 return prfx; 7910 } 7911 7912 int8_t Assembler::get_prefixq(Address adr, Register src) { 7913 int8_t prfx = (int8_t)(REX_W + 7914 ((int)adr.base_needs_rex()) + 7915 ((int)adr.index_needs_rex() << 1) + 7916 ((int)(src->encoding() >= 8) << 2)); 7917 #ifdef ASSERT 7918 if (src->encoding() < 8) { 7919 if (adr.base_needs_rex()) { 7920 if (adr.index_needs_rex()) { 7921 assert(prfx == REX_WXB, "must be"); 7922 } else { 7923 assert(prfx == REX_WB, "must be"); 7924 } 7925 } else { 7926 if (adr.index_needs_rex()) { 7927 assert(prfx == REX_WX, "must be"); 7928 } else { 7929 assert(prfx == REX_W, "must be"); 7930 } 7931 } 7932 } else { 7933 if (adr.base_needs_rex()) { 7934 if (adr.index_needs_rex()) { 7935 assert(prfx == REX_WRXB, "must be"); 7936 } else { 7937 assert(prfx == REX_WRB, "must be"); 7938 } 7939 } else { 7940 if (adr.index_needs_rex()) { 7941 assert(prfx == REX_WRX, "must be"); 7942 } else { 7943 assert(prfx == REX_WR, "must be"); 7944 } 7945 } 7946 } 7947 #endif 7948 return prfx; 7949 } 7950 7951 void Assembler::prefixq(Address adr) { 7952 emit_int8(get_prefixq(adr)); 7953 } 7954 7955 void Assembler::prefixq(Address adr, Register src) { 7956 emit_int8(get_prefixq(adr, src)); 7957 } 7958 7959 void Assembler::prefixq(Address adr, XMMRegister src) { 7960 if (src->encoding() < 8) { 7961 if (adr.base_needs_rex()) { 7962 if (adr.index_needs_rex()) { 7963 prefix(REX_WXB); 7964 } else { 7965 prefix(REX_WB); 7966 } 7967 } else { 7968 if (adr.index_needs_rex()) { 7969 prefix(REX_WX); 7970 } else { 7971 prefix(REX_W); 7972 } 7973 } 7974 } else { 7975 if (adr.base_needs_rex()) { 7976 if (adr.index_needs_rex()) { 7977 prefix(REX_WRXB); 7978 } else { 7979 prefix(REX_WRB); 7980 } 7981 } else { 7982 if (adr.index_needs_rex()) { 7983 prefix(REX_WRX); 7984 } else { 7985 prefix(REX_WR); 7986 } 7987 } 7988 } 7989 } 7990 7991 int Assembler::prefixq_and_encode(int reg_enc) { 7992 if (reg_enc < 8) { 7993 prefix(REX_W); 7994 } else { 7995 prefix(REX_WB); 7996 reg_enc -= 8; 7997 } 7998 return reg_enc; 7999 } 8000 8001 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { 8002 if (dst_enc < 8) { 8003 if (src_enc < 8) { 8004 prefix(REX_W); 8005 } else { 8006 prefix(REX_WB); 8007 src_enc -= 8; 8008 } 8009 } else { 8010 if (src_enc < 8) { 8011 prefix(REX_WR); 8012 } else { 8013 prefix(REX_WRB); 8014 src_enc -= 8; 8015 } 8016 dst_enc -= 8; 8017 } 8018 return dst_enc << 3 | src_enc; 8019 } 8020 8021 void Assembler::adcq(Register dst, int32_t imm32) { 8022 (void) prefixq_and_encode(dst->encoding()); 8023 emit_arith(0x81, 0xD0, dst, imm32); 8024 } 8025 8026 void Assembler::adcq(Register dst, Address src) { 8027 InstructionMark im(this); 8028 emit_int16(get_prefixq(src, dst), 0x13); 8029 emit_operand(dst, src); 8030 } 8031 8032 void Assembler::adcq(Register dst, Register src) { 8033 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8034 emit_arith(0x13, 0xC0, dst, src); 8035 } 8036 8037 void Assembler::addq(Address dst, int32_t imm32) { 8038 InstructionMark im(this); 8039 prefixq(dst); 8040 emit_arith_operand(0x81, rax, dst, imm32); 8041 } 8042 8043 void Assembler::addq(Address dst, Register src) { 8044 InstructionMark im(this); 8045 emit_int16(get_prefixq(dst, src), 0x01); 8046 emit_operand(src, dst); 8047 } 8048 8049 void Assembler::addq(Register dst, int32_t imm32) { 8050 (void) prefixq_and_encode(dst->encoding()); 8051 emit_arith(0x81, 0xC0, dst, imm32); 8052 } 8053 8054 void Assembler::addq(Register dst, Address src) { 8055 InstructionMark im(this); 8056 emit_int16(get_prefixq(src, dst), 0x03); 8057 emit_operand(dst, src); 8058 } 8059 8060 void Assembler::addq(Register dst, Register src) { 8061 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8062 emit_arith(0x03, 0xC0, dst, src); 8063 } 8064 8065 void Assembler::adcxq(Register dst, Register src) { 8066 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 8067 emit_int8(0x66); 8068 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8069 emit_int32(0x0F, 8070 0x38, 8071 (unsigned char)0xF6, 8072 (0xC0 | encode)); 8073 } 8074 8075 void Assembler::adoxq(Register dst, Register src) { 8076 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 8077 emit_int8((unsigned char)0xF3); 8078 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8079 emit_int32(0x0F, 8080 0x38, 8081 (unsigned char)0xF6, 8082 (0xC0 | encode)); 8083 } 8084 8085 void Assembler::andq(Address dst, int32_t imm32) { 8086 InstructionMark im(this); 8087 emit_int16(get_prefixq(dst), (unsigned char)0x81); 8088 emit_operand(rsp, dst, 4); 8089 emit_int32(imm32); 8090 } 8091 8092 void Assembler::andq(Register dst, int32_t imm32) { 8093 (void) prefixq_and_encode(dst->encoding()); 8094 emit_arith(0x81, 0xE0, dst, imm32); 8095 } 8096 8097 void Assembler::andq(Register dst, Address src) { 8098 InstructionMark im(this); 8099 emit_int16(get_prefixq(src, dst), 0x23); 8100 emit_operand(dst, src); 8101 } 8102 8103 void Assembler::andq(Register dst, Register src) { 8104 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8105 emit_arith(0x23, 0xC0, dst, src); 8106 } 8107 8108 void Assembler::andnq(Register dst, Register src1, Register src2) { 8109 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8110 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8111 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8112 emit_int16((unsigned char)0xF2, (0xC0 | encode)); 8113 } 8114 8115 void Assembler::andnq(Register dst, Register src1, Address src2) { 8116 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8117 InstructionMark im(this); 8118 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8119 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8120 emit_int8((unsigned char)0xF2); 8121 emit_operand(dst, src2); 8122 } 8123 8124 void Assembler::bsfq(Register dst, Register src) { 8125 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8126 emit_int24(0x0F, (unsigned char)0xBC, (0xC0 | encode)); 8127 } 8128 8129 void Assembler::bsrq(Register dst, Register src) { 8130 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8131 emit_int24(0x0F, (unsigned char)0xBD, (0xC0 | encode)); 8132 } 8133 8134 void Assembler::bswapq(Register reg) { 8135 int encode = prefixq_and_encode(reg->encoding()); 8136 emit_int16(0x0F, (0xC8 | encode)); 8137 } 8138 8139 void Assembler::blsiq(Register dst, Register src) { 8140 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8141 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8142 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8143 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 8144 } 8145 8146 void Assembler::blsiq(Register dst, Address src) { 8147 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8148 InstructionMark im(this); 8149 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8150 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8151 emit_int8((unsigned char)0xF3); 8152 emit_operand(rbx, src); 8153 } 8154 8155 void Assembler::blsmskq(Register dst, Register src) { 8156 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8157 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8158 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8159 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 8160 } 8161 8162 void Assembler::blsmskq(Register dst, Address src) { 8163 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8164 InstructionMark im(this); 8165 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8166 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8167 emit_int8((unsigned char)0xF3); 8168 emit_operand(rdx, src); 8169 } 8170 8171 void Assembler::blsrq(Register dst, Register src) { 8172 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8173 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8174 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8175 emit_int16((unsigned char)0xF3, (0xC0 | encode)); 8176 } 8177 8178 void Assembler::blsrq(Register dst, Address src) { 8179 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8180 InstructionMark im(this); 8181 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8182 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8183 emit_int8((unsigned char)0xF3); 8184 emit_operand(rcx, src); 8185 } 8186 8187 void Assembler::cdqq() { 8188 emit_int16(REX_W, (unsigned char)0x99); 8189 } 8190 8191 void Assembler::clflush(Address adr) { 8192 assert(VM_Version::supports_clflush(), "should do"); 8193 prefix(adr); 8194 emit_int16(0x0F, (unsigned char)0xAE); 8195 emit_operand(rdi, adr); 8196 } 8197 8198 void Assembler::clflushopt(Address adr) { 8199 assert(VM_Version::supports_clflushopt(), "should do!"); 8200 // adr should be base reg only with no index or offset 8201 assert(adr.index() == noreg, "index should be noreg"); 8202 assert(adr.scale() == Address::no_scale, "scale should be no_scale"); 8203 assert(adr.disp() == 0, "displacement should be 0"); 8204 // instruction prefix is 0x66 8205 emit_int8(0x66); 8206 prefix(adr); 8207 // opcode family is 0x0F 0xAE 8208 emit_int16(0x0F, (unsigned char)0xAE); 8209 // extended opcode byte is 7 == rdi 8210 emit_operand(rdi, adr); 8211 } 8212 8213 void Assembler::clwb(Address adr) { 8214 assert(VM_Version::supports_clwb(), "should do!"); 8215 // adr should be base reg only with no index or offset 8216 assert(adr.index() == noreg, "index should be noreg"); 8217 assert(adr.scale() == Address::no_scale, "scale should be no_scale"); 8218 assert(adr.disp() == 0, "displacement should be 0"); 8219 // instruction prefix is 0x66 8220 emit_int8(0x66); 8221 prefix(adr); 8222 // opcode family is 0x0f 0xAE 8223 emit_int16(0x0F, (unsigned char)0xAE); 8224 // extended opcode byte is 6 == rsi 8225 emit_operand(rsi, adr); 8226 } 8227 8228 void Assembler::cmovq(Condition cc, Register dst, Register src) { 8229 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8230 emit_int24(0x0F, (0x40 | cc), (0xC0 | encode)); 8231 } 8232 8233 void Assembler::cmovq(Condition cc, Register dst, Address src) { 8234 InstructionMark im(this); 8235 emit_int24(get_prefixq(src, dst), 0x0F, (0x40 | cc)); 8236 emit_operand(dst, src); 8237 } 8238 8239 void Assembler::cmpq(Address dst, int32_t imm32) { 8240 InstructionMark im(this); 8241 emit_int16(get_prefixq(dst), (unsigned char)0x81); 8242 emit_operand(rdi, dst, 4); 8243 emit_int32(imm32); 8244 } 8245 8246 void Assembler::cmpq(Register dst, int32_t imm32) { 8247 (void) prefixq_and_encode(dst->encoding()); 8248 emit_arith(0x81, 0xF8, dst, imm32); 8249 } 8250 8251 void Assembler::cmpq(Address dst, Register src) { 8252 InstructionMark im(this); 8253 emit_int16(get_prefixq(dst, src), 0x3B); 8254 emit_operand(src, dst); 8255 } 8256 8257 void Assembler::cmpq(Register dst, Register src) { 8258 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8259 emit_arith(0x3B, 0xC0, dst, src); 8260 } 8261 8262 void Assembler::cmpq(Register dst, Address src) { 8263 InstructionMark im(this); 8264 emit_int16(get_prefixq(src, dst), 0x3B); 8265 emit_operand(dst, src); 8266 } 8267 8268 void Assembler::cmpxchgq(Register reg, Address adr) { 8269 InstructionMark im(this); 8270 emit_int24(get_prefixq(adr, reg), 0x0F, (unsigned char)0xB1); 8271 emit_operand(reg, adr); 8272 } 8273 8274 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { 8275 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8276 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8277 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 8278 emit_int16(0x2A, (0xC0 | encode)); 8279 } 8280 8281 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { 8282 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8283 InstructionMark im(this); 8284 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8285 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 8286 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 8287 emit_int8(0x2A); 8288 emit_operand(dst, src); 8289 } 8290 8291 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) { 8292 NOT_LP64(assert(VM_Version::supports_sse(), "")); 8293 InstructionMark im(this); 8294 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8295 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 8296 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 8297 emit_int8(0x2A); 8298 emit_operand(dst, src); 8299 } 8300 8301 void Assembler::cvttsd2siq(Register dst, Address src) { 8302 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8303 // F2 REX.W 0F 2C /r 8304 // CVTTSD2SI r64, xmm1/m64 8305 InstructionMark im(this); 8306 emit_int32((unsigned char)0xF2, REX_W, 0x0F, 0x2C); 8307 emit_operand(dst, src); 8308 } 8309 8310 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { 8311 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8312 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8313 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 8314 emit_int16(0x2C, (0xC0 | encode)); 8315 } 8316 8317 void Assembler::cvttss2siq(Register dst, XMMRegister src) { 8318 NOT_LP64(assert(VM_Version::supports_sse(), "")); 8319 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8320 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 8321 emit_int16(0x2C, (0xC0 | encode)); 8322 } 8323 8324 void Assembler::decl(Register dst) { 8325 // Don't use it directly. Use MacroAssembler::decrementl() instead. 8326 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) 8327 int encode = prefix_and_encode(dst->encoding()); 8328 emit_int16((unsigned char)0xFF, (0xC8 | encode)); 8329 } 8330 8331 void Assembler::decq(Register dst) { 8332 // Don't use it directly. Use MacroAssembler::decrementq() instead. 8333 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 8334 int encode = prefixq_and_encode(dst->encoding()); 8335 emit_int16((unsigned char)0xFF, 0xC8 | encode); 8336 } 8337 8338 void Assembler::decq(Address dst) { 8339 // Don't use it directly. Use MacroAssembler::decrementq() instead. 8340 InstructionMark im(this); 8341 emit_int16(get_prefixq(dst), (unsigned char)0xFF); 8342 emit_operand(rcx, dst); 8343 } 8344 8345 void Assembler::fxrstor(Address src) { 8346 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); 8347 emit_operand(as_Register(1), src); 8348 } 8349 8350 void Assembler::xrstor(Address src) { 8351 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); 8352 emit_operand(as_Register(5), src); 8353 } 8354 8355 void Assembler::fxsave(Address dst) { 8356 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); 8357 emit_operand(as_Register(0), dst); 8358 } 8359 8360 void Assembler::xsave(Address dst) { 8361 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); 8362 emit_operand(as_Register(4), dst); 8363 } 8364 8365 void Assembler::idivq(Register src) { 8366 int encode = prefixq_and_encode(src->encoding()); 8367 emit_int16((unsigned char)0xF7, (0xF8 | encode)); 8368 } 8369 8370 void Assembler::imulq(Register dst, Register src) { 8371 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8372 emit_int24(0x0F, (unsigned char)0xAF, (0xC0 | encode)); 8373 } 8374 8375 void Assembler::imulq(Register dst, Register src, int value) { 8376 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8377 if (is8bit(value)) { 8378 emit_int24(0x6B, (0xC0 | encode), (value & 0xFF)); 8379 } else { 8380 emit_int16(0x69, (0xC0 | encode)); 8381 emit_int32(value); 8382 } 8383 } 8384 8385 void Assembler::imulq(Register dst, Address src) { 8386 InstructionMark im(this); 8387 emit_int24(get_prefixq(src, dst), 0x0F, (unsigned char)0xAF); 8388 emit_operand(dst, src); 8389 } 8390 8391 void Assembler::incl(Register dst) { 8392 // Don't use it directly. Use MacroAssembler::incrementl() instead. 8393 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 8394 int encode = prefix_and_encode(dst->encoding()); 8395 emit_int16((unsigned char)0xFF, (0xC0 | encode)); 8396 } 8397 8398 void Assembler::incq(Register dst) { 8399 // Don't use it directly. Use MacroAssembler::incrementq() instead. 8400 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 8401 int encode = prefixq_and_encode(dst->encoding()); 8402 emit_int16((unsigned char)0xFF, (0xC0 | encode)); 8403 } 8404 8405 void Assembler::incq(Address dst) { 8406 // Don't use it directly. Use MacroAssembler::incrementq() instead. 8407 InstructionMark im(this); 8408 emit_int16(get_prefixq(dst), (unsigned char)0xFF); 8409 emit_operand(rax, dst); 8410 } 8411 8412 void Assembler::lea(Register dst, Address src) { 8413 leaq(dst, src); 8414 } 8415 8416 void Assembler::leaq(Register dst, Address src) { 8417 InstructionMark im(this); 8418 emit_int16(get_prefixq(src, dst), (unsigned char)0x8D); 8419 emit_operand(dst, src); 8420 } 8421 8422 void Assembler::mov64(Register dst, int64_t imm64) { 8423 InstructionMark im(this); 8424 int encode = prefixq_and_encode(dst->encoding()); 8425 emit_int8(0xB8 | encode); 8426 emit_int64(imm64); 8427 } 8428 8429 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 8430 InstructionMark im(this); 8431 int encode = prefixq_and_encode(dst->encoding()); 8432 emit_int8(0xB8 | encode); 8433 emit_data64(imm64, rspec); 8434 } 8435 8436 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { 8437 InstructionMark im(this); 8438 int encode = prefix_and_encode(dst->encoding()); 8439 emit_int8(0xB8 | encode); 8440 emit_data((int)imm32, rspec, narrow_oop_operand); 8441 } 8442 8443 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { 8444 InstructionMark im(this); 8445 prefix(dst); 8446 emit_int8((unsigned char)0xC7); 8447 emit_operand(rax, dst, 4); 8448 emit_data((int)imm32, rspec, narrow_oop_operand); 8449 } 8450 8451 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { 8452 InstructionMark im(this); 8453 int encode = prefix_and_encode(src1->encoding()); 8454 emit_int16((unsigned char)0x81, (0xF8 | encode)); 8455 emit_data((int)imm32, rspec, narrow_oop_operand); 8456 } 8457 8458 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { 8459 InstructionMark im(this); 8460 prefix(src1); 8461 emit_int8((unsigned char)0x81); 8462 emit_operand(rax, src1, 4); 8463 emit_data((int)imm32, rspec, narrow_oop_operand); 8464 } 8465 8466 void Assembler::lzcntq(Register dst, Register src) { 8467 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 8468 emit_int8((unsigned char)0xF3); 8469 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8470 emit_int24(0x0F, (unsigned char)0xBD, (0xC0 | encode)); 8471 } 8472 8473 void Assembler::movdq(XMMRegister dst, Register src) { 8474 // table D-1 says MMX/SSE2 8475 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8476 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8477 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8478 emit_int16(0x6E, (0xC0 | encode)); 8479 } 8480 8481 void Assembler::movdq(Register dst, XMMRegister src) { 8482 // table D-1 says MMX/SSE2 8483 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8484 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8485 // swap src/dst to get correct prefix 8486 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8487 emit_int16(0x7E, 8488 (0xC0 | encode)); 8489 } 8490 8491 void Assembler::movq(Register dst, Register src) { 8492 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8493 emit_int16((unsigned char)0x8B, 8494 (0xC0 | encode)); 8495 } 8496 8497 void Assembler::movq(Register dst, Address src) { 8498 InstructionMark im(this); 8499 emit_int16(get_prefixq(src, dst), (unsigned char)0x8B); 8500 emit_operand(dst, src); 8501 } 8502 8503 void Assembler::movq(Address dst, Register src) { 8504 InstructionMark im(this); 8505 emit_int16(get_prefixq(dst, src), (unsigned char)0x89); 8506 emit_operand(src, dst); 8507 } 8508 8509 void Assembler::movsbq(Register dst, Address src) { 8510 InstructionMark im(this); 8511 emit_int24(get_prefixq(src, dst), 8512 0x0F, 8513 (unsigned char)0xBE); 8514 emit_operand(dst, src); 8515 } 8516 8517 void Assembler::movsbq(Register dst, Register src) { 8518 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8519 emit_int24(0x0F, (unsigned char)0xBE, (0xC0 | encode)); 8520 } 8521 8522 void Assembler::movslq(Register dst, int32_t imm32) { 8523 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 8524 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 8525 // as a result we shouldn't use until tested at runtime... 8526 ShouldNotReachHere(); 8527 InstructionMark im(this); 8528 int encode = prefixq_and_encode(dst->encoding()); 8529 emit_int8(0xC7 | encode); 8530 emit_int32(imm32); 8531 } 8532 8533 void Assembler::movslq(Address dst, int32_t imm32) { 8534 assert(is_simm32(imm32), "lost bits"); 8535 InstructionMark im(this); 8536 emit_int16(get_prefixq(dst), (unsigned char)0xC7); 8537 emit_operand(rax, dst, 4); 8538 emit_int32(imm32); 8539 } 8540 8541 void Assembler::movslq(Register dst, Address src) { 8542 InstructionMark im(this); 8543 emit_int16(get_prefixq(src, dst), 0x63); 8544 emit_operand(dst, src); 8545 } 8546 8547 void Assembler::movslq(Register dst, Register src) { 8548 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8549 emit_int16(0x63, (0xC0 | encode)); 8550 } 8551 8552 void Assembler::movswq(Register dst, Address src) { 8553 InstructionMark im(this); 8554 emit_int24(get_prefixq(src, dst), 8555 0x0F, 8556 (unsigned char)0xBF); 8557 emit_operand(dst, src); 8558 } 8559 8560 void Assembler::movswq(Register dst, Register src) { 8561 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8562 emit_int24(0x0F, (unsigned char)0xBF, (0xC0 | encode)); 8563 } 8564 8565 void Assembler::movzbq(Register dst, Address src) { 8566 InstructionMark im(this); 8567 emit_int24(get_prefixq(src, dst), 8568 0x0F, 8569 (unsigned char)0xB6); 8570 emit_operand(dst, src); 8571 } 8572 8573 void Assembler::movzbq(Register dst, Register src) { 8574 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8575 emit_int24(0x0F, (unsigned char)0xB6, (0xC0 | encode)); 8576 } 8577 8578 void Assembler::movzwq(Register dst, Address src) { 8579 InstructionMark im(this); 8580 emit_int24(get_prefixq(src, dst), 8581 0x0F, 8582 (unsigned char)0xB7); 8583 emit_operand(dst, src); 8584 } 8585 8586 void Assembler::movzwq(Register dst, Register src) { 8587 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8588 emit_int24(0x0F, (unsigned char)0xB7, (0xC0 | encode)); 8589 } 8590 8591 void Assembler::mulq(Address src) { 8592 InstructionMark im(this); 8593 emit_int16(get_prefixq(src), (unsigned char)0xF7); 8594 emit_operand(rsp, src); 8595 } 8596 8597 void Assembler::mulq(Register src) { 8598 int encode = prefixq_and_encode(src->encoding()); 8599 emit_int16((unsigned char)0xF7, (0xE0 | encode)); 8600 } 8601 8602 void Assembler::mulxq(Register dst1, Register dst2, Register src) { 8603 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 8604 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8605 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 8606 emit_int16((unsigned char)0xF6, (0xC0 | encode)); 8607 } 8608 8609 void Assembler::negq(Register dst) { 8610 int encode = prefixq_and_encode(dst->encoding()); 8611 emit_int16((unsigned char)0xF7, (0xD8 | encode)); 8612 } 8613 8614 void Assembler::notq(Register dst) { 8615 int encode = prefixq_and_encode(dst->encoding()); 8616 emit_int16((unsigned char)0xF7, (0xD0 | encode)); 8617 } 8618 8619 void Assembler::btsq(Address dst, int imm8) { 8620 assert(isByte(imm8), "not a byte"); 8621 InstructionMark im(this); 8622 emit_int24(get_prefixq(dst), 8623 0x0F, 8624 (unsigned char)0xBA); 8625 emit_operand(rbp /* 5 */, dst, 1); 8626 emit_int8(imm8); 8627 } 8628 8629 void Assembler::btrq(Address dst, int imm8) { 8630 assert(isByte(imm8), "not a byte"); 8631 InstructionMark im(this); 8632 emit_int24(get_prefixq(dst), 8633 0x0F, 8634 (unsigned char)0xBA); 8635 emit_operand(rsi /* 6 */, dst, 1); 8636 emit_int8(imm8); 8637 } 8638 8639 void Assembler::orq(Address dst, int32_t imm32) { 8640 InstructionMark im(this); 8641 emit_int16(get_prefixq(dst), (unsigned char)0x81); 8642 emit_operand(rcx, dst, 4); 8643 emit_int32(imm32); 8644 } 8645 8646 void Assembler::orq(Register dst, int32_t imm32) { 8647 (void) prefixq_and_encode(dst->encoding()); 8648 emit_arith(0x81, 0xC8, dst, imm32); 8649 } 8650 8651 void Assembler::orq(Register dst, Address src) { 8652 InstructionMark im(this); 8653 emit_int16(get_prefixq(src, dst), 0x0B); 8654 emit_operand(dst, src); 8655 } 8656 8657 void Assembler::orq(Register dst, Register src) { 8658 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8659 emit_arith(0x0B, 0xC0, dst, src); 8660 } 8661 8662 void Assembler::popcntq(Register dst, Address src) { 8663 assert(VM_Version::supports_popcnt(), "must support"); 8664 InstructionMark im(this); 8665 emit_int32((unsigned char)0xF3, 8666 get_prefixq(src, dst), 8667 0x0F, 8668 (unsigned char)0xB8); 8669 emit_operand(dst, src); 8670 } 8671 8672 void Assembler::popcntq(Register dst, Register src) { 8673 assert(VM_Version::supports_popcnt(), "must support"); 8674 emit_int8((unsigned char)0xF3); 8675 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8676 emit_int24(0x0F, (unsigned char)0xB8, (0xC0 | encode)); 8677 } 8678 8679 void Assembler::popq(Address dst) { 8680 InstructionMark im(this); 8681 emit_int16(get_prefixq(dst), (unsigned char)0x8F); 8682 emit_operand(rax, dst); 8683 } 8684 8685 // Precomputable: popa, pusha, vzeroupper 8686 8687 // The result of these routines are invariant from one invocation to another 8688 // invocation for the duration of a run. Caching the result on bootstrap 8689 // and copying it out on subsequent invocations can thus be beneficial 8690 static bool precomputed = false; 8691 8692 static u_char* popa_code = NULL; 8693 static int popa_len = 0; 8694 8695 static u_char* pusha_code = NULL; 8696 static int pusha_len = 0; 8697 8698 static u_char* vzup_code = NULL; 8699 static int vzup_len = 0; 8700 8701 void Assembler::precompute_instructions() { 8702 assert(!Universe::is_fully_initialized(), "must still be single threaded"); 8703 guarantee(!precomputed, "only once"); 8704 precomputed = true; 8705 ResourceMark rm; 8706 8707 // Make a temporary buffer big enough for the routines we're capturing 8708 int size = 256; 8709 char* tmp_code = NEW_RESOURCE_ARRAY(char, size); 8710 CodeBuffer buffer((address)tmp_code, size); 8711 MacroAssembler masm(&buffer); 8712 8713 address begin_popa = masm.code_section()->end(); 8714 masm.popa_uncached(); 8715 address end_popa = masm.code_section()->end(); 8716 masm.pusha_uncached(); 8717 address end_pusha = masm.code_section()->end(); 8718 masm.vzeroupper_uncached(); 8719 address end_vzup = masm.code_section()->end(); 8720 8721 // Save the instructions to permanent buffers. 8722 popa_len = (int)(end_popa - begin_popa); 8723 popa_code = NEW_C_HEAP_ARRAY(u_char, popa_len, mtInternal); 8724 memcpy(popa_code, begin_popa, popa_len); 8725 8726 pusha_len = (int)(end_pusha - end_popa); 8727 pusha_code = NEW_C_HEAP_ARRAY(u_char, pusha_len, mtInternal); 8728 memcpy(pusha_code, end_popa, pusha_len); 8729 8730 vzup_len = (int)(end_vzup - end_pusha); 8731 if (vzup_len > 0) { 8732 vzup_code = NEW_C_HEAP_ARRAY(u_char, vzup_len, mtInternal); 8733 memcpy(vzup_code, end_pusha, vzup_len); 8734 } else { 8735 vzup_code = pusha_code; // dummy 8736 } 8737 8738 assert(masm.code()->total_oop_size() == 0 && 8739 masm.code()->total_metadata_size() == 0 && 8740 masm.code()->total_relocation_size() == 0, 8741 "pre-computed code can't reference oops, metadata or contain relocations"); 8742 } 8743 8744 static void emit_copy(CodeSection* code_section, u_char* src, int src_len) { 8745 assert(src != NULL, "code to copy must have been pre-computed"); 8746 assert(code_section->limit() - code_section->end() > src_len, "code buffer not large enough"); 8747 address end = code_section->end(); 8748 memcpy(end, src, src_len); 8749 code_section->set_end(end + src_len); 8750 } 8751 8752 void Assembler::popa() { // 64bit 8753 emit_copy(code_section(), popa_code, popa_len); 8754 } 8755 8756 void Assembler::popa_uncached() { // 64bit 8757 movq(r15, Address(rsp, 0)); 8758 movq(r14, Address(rsp, wordSize)); 8759 movq(r13, Address(rsp, 2 * wordSize)); 8760 movq(r12, Address(rsp, 3 * wordSize)); 8761 movq(r11, Address(rsp, 4 * wordSize)); 8762 movq(r10, Address(rsp, 5 * wordSize)); 8763 movq(r9, Address(rsp, 6 * wordSize)); 8764 movq(r8, Address(rsp, 7 * wordSize)); 8765 movq(rdi, Address(rsp, 8 * wordSize)); 8766 movq(rsi, Address(rsp, 9 * wordSize)); 8767 movq(rbp, Address(rsp, 10 * wordSize)); 8768 // skip rsp 8769 movq(rbx, Address(rsp, 12 * wordSize)); 8770 movq(rdx, Address(rsp, 13 * wordSize)); 8771 movq(rcx, Address(rsp, 14 * wordSize)); 8772 movq(rax, Address(rsp, 15 * wordSize)); 8773 8774 addq(rsp, 16 * wordSize); 8775 } 8776 8777 void Assembler::pusha() { // 64bit 8778 emit_copy(code_section(), pusha_code, pusha_len); 8779 } 8780 8781 void Assembler::pusha_uncached() { // 64bit 8782 // we have to store original rsp. ABI says that 128 bytes 8783 // below rsp are local scratch. 8784 movq(Address(rsp, -5 * wordSize), rsp); 8785 8786 subq(rsp, 16 * wordSize); 8787 8788 movq(Address(rsp, 15 * wordSize), rax); 8789 movq(Address(rsp, 14 * wordSize), rcx); 8790 movq(Address(rsp, 13 * wordSize), rdx); 8791 movq(Address(rsp, 12 * wordSize), rbx); 8792 // skip rsp 8793 movq(Address(rsp, 10 * wordSize), rbp); 8794 movq(Address(rsp, 9 * wordSize), rsi); 8795 movq(Address(rsp, 8 * wordSize), rdi); 8796 movq(Address(rsp, 7 * wordSize), r8); 8797 movq(Address(rsp, 6 * wordSize), r9); 8798 movq(Address(rsp, 5 * wordSize), r10); 8799 movq(Address(rsp, 4 * wordSize), r11); 8800 movq(Address(rsp, 3 * wordSize), r12); 8801 movq(Address(rsp, 2 * wordSize), r13); 8802 movq(Address(rsp, wordSize), r14); 8803 movq(Address(rsp, 0), r15); 8804 } 8805 8806 void Assembler::vzeroupper() { 8807 emit_copy(code_section(), vzup_code, vzup_len); 8808 } 8809 8810 void Assembler::pushq(Address src) { 8811 InstructionMark im(this); 8812 emit_int16(get_prefixq(src), (unsigned char)0xFF); 8813 emit_operand(rsi, src); 8814 } 8815 8816 void Assembler::rclq(Register dst, int imm8) { 8817 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8818 int encode = prefixq_and_encode(dst->encoding()); 8819 if (imm8 == 1) { 8820 emit_int16((unsigned char)0xD1, (0xD0 | encode)); 8821 } else { 8822 emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8); 8823 } 8824 } 8825 8826 void Assembler::rcrq(Register dst, int imm8) { 8827 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8828 int encode = prefixq_and_encode(dst->encoding()); 8829 if (imm8 == 1) { 8830 emit_int16((unsigned char)0xD1, (0xD8 | encode)); 8831 } else { 8832 emit_int24((unsigned char)0xC1, (0xD8 | encode), imm8); 8833 } 8834 } 8835 8836 void Assembler::rorq(Register dst, int imm8) { 8837 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8838 int encode = prefixq_and_encode(dst->encoding()); 8839 if (imm8 == 1) { 8840 emit_int16((unsigned char)0xD1, (0xC8 | encode)); 8841 } else { 8842 emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8); 8843 } 8844 } 8845 8846 void Assembler::rorxq(Register dst, Register src, int imm8) { 8847 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 8848 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8849 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 8850 emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8); 8851 } 8852 8853 void Assembler::rorxd(Register dst, Register src, int imm8) { 8854 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 8855 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8856 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 8857 emit_int24((unsigned char)0xF0, (0xC0 | encode), imm8); 8858 } 8859 8860 void Assembler::sarq(Register dst, int imm8) { 8861 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8862 int encode = prefixq_and_encode(dst->encoding()); 8863 if (imm8 == 1) { 8864 emit_int16((unsigned char)0xD1, (0xF8 | encode)); 8865 } else { 8866 emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8); 8867 } 8868 } 8869 8870 void Assembler::sarq(Register dst) { 8871 int encode = prefixq_and_encode(dst->encoding()); 8872 emit_int16((unsigned char)0xD3, (0xF8 | encode)); 8873 } 8874 8875 void Assembler::sbbq(Address dst, int32_t imm32) { 8876 InstructionMark im(this); 8877 prefixq(dst); 8878 emit_arith_operand(0x81, rbx, dst, imm32); 8879 } 8880 8881 void Assembler::sbbq(Register dst, int32_t imm32) { 8882 (void) prefixq_and_encode(dst->encoding()); 8883 emit_arith(0x81, 0xD8, dst, imm32); 8884 } 8885 8886 void Assembler::sbbq(Register dst, Address src) { 8887 InstructionMark im(this); 8888 emit_int16(get_prefixq(src, dst), 0x1B); 8889 emit_operand(dst, src); 8890 } 8891 8892 void Assembler::sbbq(Register dst, Register src) { 8893 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8894 emit_arith(0x1B, 0xC0, dst, src); 8895 } 8896 8897 void Assembler::shlq(Register dst, int imm8) { 8898 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8899 int encode = prefixq_and_encode(dst->encoding()); 8900 if (imm8 == 1) { 8901 emit_int16((unsigned char)0xD1, (0xE0 | encode)); 8902 } else { 8903 emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); 8904 } 8905 } 8906 8907 void Assembler::shlq(Register dst) { 8908 int encode = prefixq_and_encode(dst->encoding()); 8909 emit_int16((unsigned char)0xD3, (0xE0 | encode)); 8910 } 8911 8912 void Assembler::shrq(Register dst, int imm8) { 8913 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8914 int encode = prefixq_and_encode(dst->encoding()); 8915 emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8); 8916 } 8917 8918 void Assembler::shrq(Register dst) { 8919 int encode = prefixq_and_encode(dst->encoding()); 8920 emit_int16((unsigned char)0xD3, 0xE8 | encode); 8921 } 8922 8923 void Assembler::subq(Address dst, int32_t imm32) { 8924 InstructionMark im(this); 8925 prefixq(dst); 8926 emit_arith_operand(0x81, rbp, dst, imm32); 8927 } 8928 8929 void Assembler::subq(Address dst, Register src) { 8930 InstructionMark im(this); 8931 emit_int16(get_prefixq(dst, src), 0x29); 8932 emit_operand(src, dst); 8933 } 8934 8935 void Assembler::subq(Register dst, int32_t imm32) { 8936 (void) prefixq_and_encode(dst->encoding()); 8937 emit_arith(0x81, 0xE8, dst, imm32); 8938 } 8939 8940 // Force generation of a 4 byte immediate value even if it fits into 8bit 8941 void Assembler::subq_imm32(Register dst, int32_t imm32) { 8942 (void) prefixq_and_encode(dst->encoding()); 8943 emit_arith_imm32(0x81, 0xE8, dst, imm32); 8944 } 8945 8946 void Assembler::subq(Register dst, Address src) { 8947 InstructionMark im(this); 8948 emit_int16(get_prefixq(src, dst), 0x2B); 8949 emit_operand(dst, src); 8950 } 8951 8952 void Assembler::subq(Register dst, Register src) { 8953 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8954 emit_arith(0x2B, 0xC0, dst, src); 8955 } 8956 8957 void Assembler::testq(Register dst, int32_t imm32) { 8958 // not using emit_arith because test 8959 // doesn't support sign-extension of 8960 // 8bit operands 8961 int encode = dst->encoding(); 8962 if (encode == 0) { 8963 emit_int16(REX_W, (unsigned char)0xA9); 8964 } else { 8965 encode = prefixq_and_encode(encode); 8966 emit_int16((unsigned char)0xF7, (0xC0 | encode)); 8967 } 8968 emit_int32(imm32); 8969 } 8970 8971 void Assembler::testq(Register dst, Register src) { 8972 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8973 emit_arith(0x85, 0xC0, dst, src); 8974 } 8975 8976 void Assembler::testq(Register dst, Address src) { 8977 InstructionMark im(this); 8978 emit_int16(get_prefixq(src, dst), (unsigned char)0x85); 8979 emit_operand(dst, src); 8980 } 8981 8982 void Assembler::xaddq(Address dst, Register src) { 8983 InstructionMark im(this); 8984 emit_int24(get_prefixq(dst, src), 0x0F, (unsigned char)0xC1); 8985 emit_operand(src, dst); 8986 } 8987 8988 void Assembler::xchgq(Register dst, Address src) { 8989 InstructionMark im(this); 8990 emit_int16(get_prefixq(src, dst), (unsigned char)0x87); 8991 emit_operand(dst, src); 8992 } 8993 8994 void Assembler::xchgq(Register dst, Register src) { 8995 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8996 emit_int16((unsigned char)0x87, (0xc0 | encode)); 8997 } 8998 8999 void Assembler::xorq(Register dst, Register src) { 9000 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 9001 emit_arith(0x33, 0xC0, dst, src); 9002 } 9003 9004 void Assembler::xorq(Register dst, Address src) { 9005 InstructionMark im(this); 9006 emit_int16(get_prefixq(src, dst), 0x33); 9007 emit_operand(dst, src); 9008 } 9009 9010 #endif // !LP64