1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "gc/shared/cardTableBarrierSet.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/objectMonitor.hpp" 35 #include "runtime/os.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "utilities/macros.hpp" 39 40 #ifdef PRODUCT 41 #define BLOCK_COMMENT(str) /* nothing */ 42 #define STOP(error) stop(error) 43 #else 44 #define BLOCK_COMMENT(str) block_comment(str) 45 #define STOP(error) block_comment(error); stop(error) 46 #endif 47 48 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 49 // Implementation of AddressLiteral 50 51 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms. 52 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = { 53 // -----------------Table 4.5 -------------------- // 54 16, 32, 64, // EVEX_FV(0) 55 4, 4, 4, // EVEX_FV(1) - with Evex.b 56 16, 32, 64, // EVEX_FV(2) - with Evex.w 57 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b 58 8, 16, 32, // EVEX_HV(0) 59 4, 4, 4, // EVEX_HV(1) - with Evex.b 60 // -----------------Table 4.6 -------------------- // 61 16, 32, 64, // EVEX_FVM(0) 62 1, 1, 1, // EVEX_T1S(0) 63 2, 2, 2, // EVEX_T1S(1) 64 4, 4, 4, // EVEX_T1S(2) 65 8, 8, 8, // EVEX_T1S(3) 66 4, 4, 4, // EVEX_T1F(0) 67 8, 8, 8, // EVEX_T1F(1) 68 8, 8, 8, // EVEX_T2(0) 69 0, 16, 16, // EVEX_T2(1) 70 0, 16, 16, // EVEX_T4(0) 71 0, 0, 32, // EVEX_T4(1) 72 0, 0, 32, // EVEX_T8(0) 73 8, 16, 32, // EVEX_HVM(0) 74 4, 8, 16, // EVEX_QVM(0) 75 2, 4, 8, // EVEX_OVM(0) 76 16, 16, 16, // EVEX_M128(0) 77 8, 32, 64, // EVEX_DUP(0) 78 0, 0, 0 // EVEX_NTUP 79 }; 80 81 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { 82 _is_lval = false; 83 _target = target; 84 switch (rtype) { 85 case relocInfo::oop_type: 86 case relocInfo::metadata_type: 87 // Oops are a special case. Normally they would be their own section 88 // but in cases like icBuffer they are literals in the code stream that 89 // we don't have a section for. We use none so that we get a literal address 90 // which is always patchable. 91 break; 92 case relocInfo::external_word_type: 93 _rspec = external_word_Relocation::spec(target); 94 break; 95 case relocInfo::internal_word_type: 96 _rspec = internal_word_Relocation::spec(target); 97 break; 98 case relocInfo::opt_virtual_call_type: 99 _rspec = opt_virtual_call_Relocation::spec(); 100 break; 101 case relocInfo::static_call_type: 102 _rspec = static_call_Relocation::spec(); 103 break; 104 case relocInfo::runtime_call_type: 105 _rspec = runtime_call_Relocation::spec(); 106 break; 107 case relocInfo::poll_type: 108 case relocInfo::poll_return_type: 109 _rspec = Relocation::spec_simple(rtype); 110 break; 111 case relocInfo::none: 112 break; 113 default: 114 ShouldNotReachHere(); 115 break; 116 } 117 } 118 119 // Implementation of Address 120 121 #ifdef _LP64 122 123 Address Address::make_array(ArrayAddress adr) { 124 // Not implementable on 64bit machines 125 // Should have been handled higher up the call chain. 126 ShouldNotReachHere(); 127 return Address(); 128 } 129 130 // exceedingly dangerous constructor 131 Address::Address(int disp, address loc, relocInfo::relocType rtype) { 132 _base = noreg; 133 _index = noreg; 134 _scale = no_scale; 135 _disp = disp; 136 _xmmindex = xnoreg; 137 _isxmmindex = false; 138 switch (rtype) { 139 case relocInfo::external_word_type: 140 _rspec = external_word_Relocation::spec(loc); 141 break; 142 case relocInfo::internal_word_type: 143 _rspec = internal_word_Relocation::spec(loc); 144 break; 145 case relocInfo::runtime_call_type: 146 // HMM 147 _rspec = runtime_call_Relocation::spec(); 148 break; 149 case relocInfo::poll_type: 150 case relocInfo::poll_return_type: 151 _rspec = Relocation::spec_simple(rtype); 152 break; 153 case relocInfo::none: 154 break; 155 default: 156 ShouldNotReachHere(); 157 } 158 } 159 #else // LP64 160 161 Address Address::make_array(ArrayAddress adr) { 162 AddressLiteral base = adr.base(); 163 Address index = adr.index(); 164 assert(index._disp == 0, "must not have disp"); // maybe it can? 165 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); 166 array._rspec = base._rspec; 167 return array; 168 } 169 170 // exceedingly dangerous constructor 171 Address::Address(address loc, RelocationHolder spec) { 172 _base = noreg; 173 _index = noreg; 174 _scale = no_scale; 175 _disp = (intptr_t) loc; 176 _rspec = spec; 177 _xmmindex = xnoreg; 178 _isxmmindex = false; 179 } 180 181 #endif // _LP64 182 183 184 185 // Convert the raw encoding form into the form expected by the constructor for 186 // Address. An index of 4 (rsp) corresponds to having no index, so convert 187 // that to noreg for the Address constructor. 188 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 189 RelocationHolder rspec; 190 if (disp_reloc != relocInfo::none) { 191 rspec = Relocation::spec_simple(disp_reloc); 192 } 193 bool valid_index = index != rsp->encoding(); 194 if (valid_index) { 195 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 196 madr._rspec = rspec; 197 return madr; 198 } else { 199 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 200 madr._rspec = rspec; 201 return madr; 202 } 203 } 204 205 // Implementation of Assembler 206 207 int AbstractAssembler::code_fill_byte() { 208 return (u_char)'\xF4'; // hlt 209 } 210 211 // make this go away someday 212 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { 213 if (rtype == relocInfo::none) 214 emit_int32(data); 215 else 216 emit_data(data, Relocation::spec_simple(rtype), format); 217 } 218 219 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { 220 assert(imm_operand == 0, "default format must be immediate in this file"); 221 assert(inst_mark() != NULL, "must be inside InstructionMark"); 222 if (rspec.type() != relocInfo::none) { 223 #ifdef ASSERT 224 check_relocation(rspec, format); 225 #endif 226 // Do not use AbstractAssembler::relocate, which is not intended for 227 // embedded words. Instead, relocate to the enclosing instruction. 228 229 // hack. call32 is too wide for mask so use disp32 230 if (format == call32_operand) 231 code_section()->relocate(inst_mark(), rspec, disp32_operand); 232 else 233 code_section()->relocate(inst_mark(), rspec, format); 234 } 235 emit_int32(data); 236 } 237 238 static int encode(Register r) { 239 int enc = r->encoding(); 240 if (enc >= 8) { 241 enc -= 8; 242 } 243 return enc; 244 } 245 246 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { 247 assert(dst->has_byte_register(), "must have byte register"); 248 assert(isByte(op1) && isByte(op2), "wrong opcode"); 249 assert(isByte(imm8), "not a byte"); 250 assert((op1 & 0x01) == 0, "should be 8bit operation"); 251 emit_int8(op1); 252 emit_int8(op2 | encode(dst)); 253 emit_int8(imm8); 254 } 255 256 257 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { 258 assert(isByte(op1) && isByte(op2), "wrong opcode"); 259 assert((op1 & 0x01) == 1, "should be 32bit operation"); 260 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 261 if (is8bit(imm32)) { 262 emit_int8(op1 | 0x02); // set sign bit 263 emit_int8(op2 | encode(dst)); 264 emit_int8(imm32 & 0xFF); 265 } else { 266 emit_int8(op1); 267 emit_int8(op2 | encode(dst)); 268 emit_int32(imm32); 269 } 270 } 271 272 // Force generation of a 4 byte immediate value even if it fits into 8bit 273 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { 274 assert(isByte(op1) && isByte(op2), "wrong opcode"); 275 assert((op1 & 0x01) == 1, "should be 32bit operation"); 276 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 277 emit_int8(op1); 278 emit_int8(op2 | encode(dst)); 279 emit_int32(imm32); 280 } 281 282 // immediate-to-memory forms 283 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { 284 assert((op1 & 0x01) == 1, "should be 32bit operation"); 285 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 286 if (is8bit(imm32)) { 287 emit_int8(op1 | 0x02); // set sign bit 288 emit_operand(rm, adr, 1); 289 emit_int8(imm32 & 0xFF); 290 } else { 291 emit_int8(op1); 292 emit_operand(rm, adr, 4); 293 emit_int32(imm32); 294 } 295 } 296 297 298 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { 299 assert(isByte(op1) && isByte(op2), "wrong opcode"); 300 emit_int8(op1); 301 emit_int8(op2 | encode(dst) << 3 | encode(src)); 302 } 303 304 305 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 306 int cur_tuple_type, int in_size_in_bits, int cur_encoding) { 307 int mod_idx = 0; 308 // We will test if the displacement fits the compressed format and if so 309 // apply the compression to the displacment iff the result is8bit. 310 if (VM_Version::supports_evex() && is_evex_inst) { 311 switch (cur_tuple_type) { 312 case EVEX_FV: 313 if ((cur_encoding & VEX_W) == VEX_W) { 314 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 315 } else { 316 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 317 } 318 break; 319 320 case EVEX_HV: 321 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 322 break; 323 324 case EVEX_FVM: 325 break; 326 327 case EVEX_T1S: 328 switch (in_size_in_bits) { 329 case EVEX_8bit: 330 break; 331 332 case EVEX_16bit: 333 mod_idx = 1; 334 break; 335 336 case EVEX_32bit: 337 mod_idx = 2; 338 break; 339 340 case EVEX_64bit: 341 mod_idx = 3; 342 break; 343 } 344 break; 345 346 case EVEX_T1F: 347 case EVEX_T2: 348 case EVEX_T4: 349 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0; 350 break; 351 352 case EVEX_T8: 353 break; 354 355 case EVEX_HVM: 356 break; 357 358 case EVEX_QVM: 359 break; 360 361 case EVEX_OVM: 362 break; 363 364 case EVEX_M128: 365 break; 366 367 case EVEX_DUP: 368 break; 369 370 default: 371 assert(0, "no valid evex tuple_table entry"); 372 break; 373 } 374 375 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 376 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len]; 377 if ((disp % disp_factor) == 0) { 378 int new_disp = disp / disp_factor; 379 if ((-0x80 <= new_disp && new_disp < 0x80)) { 380 disp = new_disp; 381 } 382 } else { 383 return false; 384 } 385 } 386 } 387 return (-0x80 <= disp && disp < 0x80); 388 } 389 390 391 bool Assembler::emit_compressed_disp_byte(int &disp) { 392 int mod_idx = 0; 393 // We will test if the displacement fits the compressed format and if so 394 // apply the compression to the displacment iff the result is8bit. 395 if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) { 396 int evex_encoding = _attributes->get_evex_encoding(); 397 int tuple_type = _attributes->get_tuple_type(); 398 switch (tuple_type) { 399 case EVEX_FV: 400 if ((evex_encoding & VEX_W) == VEX_W) { 401 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 402 } else { 403 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 404 } 405 break; 406 407 case EVEX_HV: 408 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 409 break; 410 411 case EVEX_FVM: 412 break; 413 414 case EVEX_T1S: 415 switch (_attributes->get_input_size()) { 416 case EVEX_8bit: 417 break; 418 419 case EVEX_16bit: 420 mod_idx = 1; 421 break; 422 423 case EVEX_32bit: 424 mod_idx = 2; 425 break; 426 427 case EVEX_64bit: 428 mod_idx = 3; 429 break; 430 } 431 break; 432 433 case EVEX_T1F: 434 case EVEX_T2: 435 case EVEX_T4: 436 mod_idx = (_attributes->get_input_size() == EVEX_64bit) ? 1 : 0; 437 break; 438 439 case EVEX_T8: 440 break; 441 442 case EVEX_HVM: 443 break; 444 445 case EVEX_QVM: 446 break; 447 448 case EVEX_OVM: 449 break; 450 451 case EVEX_M128: 452 break; 453 454 case EVEX_DUP: 455 break; 456 457 default: 458 assert(0, "no valid evex tuple_table entry"); 459 break; 460 } 461 462 int vector_len = _attributes->get_vector_len(); 463 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 464 int disp_factor = tuple_table[tuple_type + mod_idx][vector_len]; 465 if ((disp % disp_factor) == 0) { 466 int new_disp = disp / disp_factor; 467 if (is8bit(new_disp)) { 468 disp = new_disp; 469 } 470 } else { 471 return false; 472 } 473 } 474 } 475 return is8bit(disp); 476 } 477 478 479 void Assembler::emit_operand(Register reg, Register base, Register index, 480 Address::ScaleFactor scale, int disp, 481 RelocationHolder const& rspec, 482 int rip_relative_correction) { 483 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); 484 485 // Encode the registers as needed in the fields they are used in 486 487 int regenc = encode(reg) << 3; 488 int indexenc = index->is_valid() ? encode(index) << 3 : 0; 489 int baseenc = base->is_valid() ? encode(base) : 0; 490 491 if (base->is_valid()) { 492 if (index->is_valid()) { 493 assert(scale != Address::no_scale, "inconsistent address"); 494 // [base + index*scale + disp] 495 if (disp == 0 && rtype == relocInfo::none && 496 base != rbp LP64_ONLY(&& base != r13)) { 497 // [base + index*scale] 498 // [00 reg 100][ss index base] 499 assert(index != rsp, "illegal addressing mode"); 500 emit_int8(0x04 | regenc); 501 emit_int8(scale << 6 | indexenc | baseenc); 502 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 503 // [base + index*scale + imm8] 504 // [01 reg 100][ss index base] imm8 505 assert(index != rsp, "illegal addressing mode"); 506 emit_int8(0x44 | regenc); 507 emit_int8(scale << 6 | indexenc | baseenc); 508 emit_int8(disp & 0xFF); 509 } else { 510 // [base + index*scale + disp32] 511 // [10 reg 100][ss index base] disp32 512 assert(index != rsp, "illegal addressing mode"); 513 emit_int8(0x84 | regenc); 514 emit_int8(scale << 6 | indexenc | baseenc); 515 emit_data(disp, rspec, disp32_operand); 516 } 517 } else if (base == rsp LP64_ONLY(|| base == r12)) { 518 // [rsp + disp] 519 if (disp == 0 && rtype == relocInfo::none) { 520 // [rsp] 521 // [00 reg 100][00 100 100] 522 emit_int8(0x04 | regenc); 523 emit_int8(0x24); 524 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 525 // [rsp + imm8] 526 // [01 reg 100][00 100 100] disp8 527 emit_int8(0x44 | regenc); 528 emit_int8(0x24); 529 emit_int8(disp & 0xFF); 530 } else { 531 // [rsp + imm32] 532 // [10 reg 100][00 100 100] disp32 533 emit_int8(0x84 | regenc); 534 emit_int8(0x24); 535 emit_data(disp, rspec, disp32_operand); 536 } 537 } else { 538 // [base + disp] 539 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); 540 if (disp == 0 && rtype == relocInfo::none && 541 base != rbp LP64_ONLY(&& base != r13)) { 542 // [base] 543 // [00 reg base] 544 emit_int8(0x00 | regenc | baseenc); 545 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 546 // [base + disp8] 547 // [01 reg base] disp8 548 emit_int8(0x40 | regenc | baseenc); 549 emit_int8(disp & 0xFF); 550 } else { 551 // [base + disp32] 552 // [10 reg base] disp32 553 emit_int8(0x80 | regenc | baseenc); 554 emit_data(disp, rspec, disp32_operand); 555 } 556 } 557 } else { 558 if (index->is_valid()) { 559 assert(scale != Address::no_scale, "inconsistent address"); 560 // [index*scale + disp] 561 // [00 reg 100][ss index 101] disp32 562 assert(index != rsp, "illegal addressing mode"); 563 emit_int8(0x04 | regenc); 564 emit_int8(scale << 6 | indexenc | 0x05); 565 emit_data(disp, rspec, disp32_operand); 566 } else if (rtype != relocInfo::none ) { 567 // [disp] (64bit) RIP-RELATIVE (32bit) abs 568 // [00 000 101] disp32 569 570 emit_int8(0x05 | regenc); 571 // Note that the RIP-rel. correction applies to the generated 572 // disp field, but _not_ to the target address in the rspec. 573 574 // disp was created by converting the target address minus the pc 575 // at the start of the instruction. That needs more correction here. 576 // intptr_t disp = target - next_ip; 577 assert(inst_mark() != NULL, "must be inside InstructionMark"); 578 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; 579 int64_t adjusted = disp; 580 // Do rip-rel adjustment for 64bit 581 LP64_ONLY(adjusted -= (next_ip - inst_mark())); 582 assert(is_simm32(adjusted), 583 "must be 32bit offset (RIP relative address)"); 584 emit_data((int32_t) adjusted, rspec, disp32_operand); 585 586 } else { 587 // 32bit never did this, did everything as the rip-rel/disp code above 588 // [disp] ABSOLUTE 589 // [00 reg 100][00 100 101] disp32 590 emit_int8(0x04 | regenc); 591 emit_int8(0x25); 592 emit_data(disp, rspec, disp32_operand); 593 } 594 } 595 } 596 597 void Assembler::emit_operand(XMMRegister reg, Register base, Register index, 598 Address::ScaleFactor scale, int disp, 599 RelocationHolder const& rspec) { 600 if (UseAVX > 2) { 601 int xreg_enc = reg->encoding(); 602 if (xreg_enc > 15) { 603 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf); 604 emit_operand((Register)new_reg, base, index, scale, disp, rspec); 605 return; 606 } 607 } 608 emit_operand((Register)reg, base, index, scale, disp, rspec); 609 } 610 611 void Assembler::emit_operand(XMMRegister reg, Register base, XMMRegister index, 612 Address::ScaleFactor scale, int disp, 613 RelocationHolder const& rspec) { 614 if (UseAVX > 2) { 615 int xreg_enc = reg->encoding(); 616 int xmmindex_enc = index->encoding(); 617 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf); 618 XMMRegister new_index = as_XMMRegister(xmmindex_enc & 0xf); 619 emit_operand((Register)new_reg, base, (Register)new_index, scale, disp, rspec); 620 } else { 621 emit_operand((Register)reg, base, (Register)index, scale, disp, rspec); 622 } 623 } 624 625 626 // Secret local extension to Assembler::WhichOperand: 627 #define end_pc_operand (_WhichOperand_limit) 628 629 address Assembler::locate_operand(address inst, WhichOperand which) { 630 // Decode the given instruction, and return the address of 631 // an embedded 32-bit operand word. 632 633 // If "which" is disp32_operand, selects the displacement portion 634 // of an effective address specifier. 635 // If "which" is imm64_operand, selects the trailing immediate constant. 636 // If "which" is call32_operand, selects the displacement of a call or jump. 637 // Caller is responsible for ensuring that there is such an operand, 638 // and that it is 32/64 bits wide. 639 640 // If "which" is end_pc_operand, find the end of the instruction. 641 642 address ip = inst; 643 bool is_64bit = false; 644 645 debug_only(bool has_disp32 = false); 646 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn 647 648 again_after_prefix: 649 switch (0xFF & *ip++) { 650 651 // These convenience macros generate groups of "case" labels for the switch. 652 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 653 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ 654 case (x)+4: case (x)+5: case (x)+6: case (x)+7 655 #define REP16(x) REP8((x)+0): \ 656 case REP8((x)+8) 657 658 case CS_segment: 659 case SS_segment: 660 case DS_segment: 661 case ES_segment: 662 case FS_segment: 663 case GS_segment: 664 // Seems dubious 665 LP64_ONLY(assert(false, "shouldn't have that prefix")); 666 assert(ip == inst+1, "only one prefix allowed"); 667 goto again_after_prefix; 668 669 case 0x67: 670 case REX: 671 case REX_B: 672 case REX_X: 673 case REX_XB: 674 case REX_R: 675 case REX_RB: 676 case REX_RX: 677 case REX_RXB: 678 NOT_LP64(assert(false, "64bit prefixes")); 679 goto again_after_prefix; 680 681 case REX_W: 682 case REX_WB: 683 case REX_WX: 684 case REX_WXB: 685 case REX_WR: 686 case REX_WRB: 687 case REX_WRX: 688 case REX_WRXB: 689 NOT_LP64(assert(false, "64bit prefixes")); 690 is_64bit = true; 691 goto again_after_prefix; 692 693 case 0xFF: // pushq a; decl a; incl a; call a; jmp a 694 case 0x88: // movb a, r 695 case 0x89: // movl a, r 696 case 0x8A: // movb r, a 697 case 0x8B: // movl r, a 698 case 0x8F: // popl a 699 debug_only(has_disp32 = true); 700 break; 701 702 case 0x68: // pushq #32 703 if (which == end_pc_operand) { 704 return ip + 4; 705 } 706 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); 707 return ip; // not produced by emit_operand 708 709 case 0x66: // movw ... (size prefix) 710 again_after_size_prefix2: 711 switch (0xFF & *ip++) { 712 case REX: 713 case REX_B: 714 case REX_X: 715 case REX_XB: 716 case REX_R: 717 case REX_RB: 718 case REX_RX: 719 case REX_RXB: 720 case REX_W: 721 case REX_WB: 722 case REX_WX: 723 case REX_WXB: 724 case REX_WR: 725 case REX_WRB: 726 case REX_WRX: 727 case REX_WRXB: 728 NOT_LP64(assert(false, "64bit prefix found")); 729 goto again_after_size_prefix2; 730 case 0x8B: // movw r, a 731 case 0x89: // movw a, r 732 debug_only(has_disp32 = true); 733 break; 734 case 0xC7: // movw a, #16 735 debug_only(has_disp32 = true); 736 tail_size = 2; // the imm16 737 break; 738 case 0x0F: // several SSE/SSE2 variants 739 ip--; // reparse the 0x0F 740 goto again_after_prefix; 741 default: 742 ShouldNotReachHere(); 743 } 744 break; 745 746 case REP8(0xB8): // movl/q r, #32/#64(oop?) 747 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); 748 // these asserts are somewhat nonsensical 749 #ifndef _LP64 750 assert(which == imm_operand || which == disp32_operand, 751 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 752 #else 753 assert((which == call32_operand || which == imm_operand) && is_64bit || 754 which == narrow_oop_operand && !is_64bit, 755 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 756 #endif // _LP64 757 return ip; 758 759 case 0x69: // imul r, a, #32 760 case 0xC7: // movl a, #32(oop?) 761 tail_size = 4; 762 debug_only(has_disp32 = true); // has both kinds of operands! 763 break; 764 765 case 0x0F: // movx..., etc. 766 switch (0xFF & *ip++) { 767 case 0x3A: // pcmpestri 768 tail_size = 1; 769 case 0x38: // ptest, pmovzxbw 770 ip++; // skip opcode 771 debug_only(has_disp32 = true); // has both kinds of operands! 772 break; 773 774 case 0x70: // pshufd r, r/a, #8 775 debug_only(has_disp32 = true); // has both kinds of operands! 776 case 0x73: // psrldq r, #8 777 tail_size = 1; 778 break; 779 780 case 0x12: // movlps 781 case 0x28: // movaps 782 case 0x2E: // ucomiss 783 case 0x2F: // comiss 784 case 0x54: // andps 785 case 0x55: // andnps 786 case 0x56: // orps 787 case 0x57: // xorps 788 case 0x58: // addpd 789 case 0x59: // mulpd 790 case 0x6E: // movd 791 case 0x7E: // movd 792 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush 793 case 0xFE: // paddd 794 debug_only(has_disp32 = true); 795 break; 796 797 case 0xAD: // shrd r, a, %cl 798 case 0xAF: // imul r, a 799 case 0xBE: // movsbl r, a (movsxb) 800 case 0xBF: // movswl r, a (movsxw) 801 case 0xB6: // movzbl r, a (movzxb) 802 case 0xB7: // movzwl r, a (movzxw) 803 case REP16(0x40): // cmovl cc, r, a 804 case 0xB0: // cmpxchgb 805 case 0xB1: // cmpxchg 806 case 0xC1: // xaddl 807 case 0xC7: // cmpxchg8 808 case REP16(0x90): // setcc a 809 debug_only(has_disp32 = true); 810 // fall out of the switch to decode the address 811 break; 812 813 case 0xC4: // pinsrw r, a, #8 814 debug_only(has_disp32 = true); 815 case 0xC5: // pextrw r, r, #8 816 tail_size = 1; // the imm8 817 break; 818 819 case 0xAC: // shrd r, a, #8 820 debug_only(has_disp32 = true); 821 tail_size = 1; // the imm8 822 break; 823 824 case REP16(0x80): // jcc rdisp32 825 if (which == end_pc_operand) return ip + 4; 826 assert(which == call32_operand, "jcc has no disp32 or imm"); 827 return ip; 828 default: 829 ShouldNotReachHere(); 830 } 831 break; 832 833 case 0x81: // addl a, #32; addl r, #32 834 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 835 // on 32bit in the case of cmpl, the imm might be an oop 836 tail_size = 4; 837 debug_only(has_disp32 = true); // has both kinds of operands! 838 break; 839 840 case 0x83: // addl a, #8; addl r, #8 841 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 842 debug_only(has_disp32 = true); // has both kinds of operands! 843 tail_size = 1; 844 break; 845 846 case 0x9B: 847 switch (0xFF & *ip++) { 848 case 0xD9: // fnstcw a 849 debug_only(has_disp32 = true); 850 break; 851 default: 852 ShouldNotReachHere(); 853 } 854 break; 855 856 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a 857 case REP4(0x10): // adc... 858 case REP4(0x20): // and... 859 case REP4(0x30): // xor... 860 case REP4(0x08): // or... 861 case REP4(0x18): // sbb... 862 case REP4(0x28): // sub... 863 case 0xF7: // mull a 864 case 0x8D: // lea r, a 865 case 0x87: // xchg r, a 866 case REP4(0x38): // cmp... 867 case 0x85: // test r, a 868 debug_only(has_disp32 = true); // has both kinds of operands! 869 break; 870 871 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 872 case 0xC6: // movb a, #8 873 case 0x80: // cmpb a, #8 874 case 0x6B: // imul r, a, #8 875 debug_only(has_disp32 = true); // has both kinds of operands! 876 tail_size = 1; // the imm8 877 break; 878 879 case 0xC4: // VEX_3bytes 880 case 0xC5: // VEX_2bytes 881 assert((UseAVX > 0), "shouldn't have VEX prefix"); 882 assert(ip == inst+1, "no prefixes allowed"); 883 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions 884 // but they have prefix 0x0F and processed when 0x0F processed above. 885 // 886 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES 887 // instructions (these instructions are not supported in 64-bit mode). 888 // To distinguish them bits [7:6] are set in the VEX second byte since 889 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set 890 // those VEX bits REX and vvvv bits are inverted. 891 // 892 // Fortunately C2 doesn't generate these instructions so we don't need 893 // to check for them in product version. 894 895 // Check second byte 896 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions")); 897 898 int vex_opcode; 899 // First byte 900 if ((0xFF & *inst) == VEX_3bytes) { 901 vex_opcode = VEX_OPCODE_MASK & *ip; 902 ip++; // third byte 903 is_64bit = ((VEX_W & *ip) == VEX_W); 904 } else { 905 vex_opcode = VEX_OPCODE_0F; 906 } 907 ip++; // opcode 908 // To find the end of instruction (which == end_pc_operand). 909 switch (vex_opcode) { 910 case VEX_OPCODE_0F: 911 switch (0xFF & *ip) { 912 case 0x70: // pshufd r, r/a, #8 913 case 0x71: // ps[rl|ra|ll]w r, #8 914 case 0x72: // ps[rl|ra|ll]d r, #8 915 case 0x73: // ps[rl|ra|ll]q r, #8 916 case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8 917 case 0xC4: // pinsrw r, r, r/a, #8 918 case 0xC5: // pextrw r/a, r, #8 919 case 0xC6: // shufp[s|d] r, r, r/a, #8 920 tail_size = 1; // the imm8 921 break; 922 } 923 break; 924 case VEX_OPCODE_0F_3A: 925 tail_size = 1; 926 break; 927 } 928 ip++; // skip opcode 929 debug_only(has_disp32 = true); // has both kinds of operands! 930 break; 931 932 case 0x62: // EVEX_4bytes 933 assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix"); 934 assert(ip == inst+1, "no prefixes allowed"); 935 // no EVEX collisions, all instructions that have 0x62 opcodes 936 // have EVEX versions and are subopcodes of 0x66 937 ip++; // skip P0 and exmaine W in P1 938 is_64bit = ((VEX_W & *ip) == VEX_W); 939 ip++; // move to P2 940 ip++; // skip P2, move to opcode 941 // To find the end of instruction (which == end_pc_operand). 942 switch (0xFF & *ip) { 943 case 0x22: // pinsrd r, r/a, #8 944 case 0x61: // pcmpestri r, r/a, #8 945 case 0x70: // pshufd r, r/a, #8 946 case 0x73: // psrldq r, #8 947 case 0x1f: // evpcmpd/evpcmpq 948 case 0x3f: // evpcmpb/evpcmpw 949 tail_size = 1; // the imm8 950 break; 951 default: 952 break; 953 } 954 ip++; // skip opcode 955 debug_only(has_disp32 = true); // has both kinds of operands! 956 break; 957 958 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 959 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl 960 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a 961 case 0xDD: // fld_d a; fst_d a; fstp_d a 962 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a 963 case 0xDF: // fild_d a; fistp_d a 964 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a 965 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a 966 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a 967 debug_only(has_disp32 = true); 968 break; 969 970 case 0xE8: // call rdisp32 971 case 0xE9: // jmp rdisp32 972 if (which == end_pc_operand) return ip + 4; 973 assert(which == call32_operand, "call has no disp32 or imm"); 974 return ip; 975 976 case 0xF0: // Lock 977 assert(os::is_MP(), "only on MP"); 978 goto again_after_prefix; 979 980 case 0xF3: // For SSE 981 case 0xF2: // For SSE2 982 switch (0xFF & *ip++) { 983 case REX: 984 case REX_B: 985 case REX_X: 986 case REX_XB: 987 case REX_R: 988 case REX_RB: 989 case REX_RX: 990 case REX_RXB: 991 case REX_W: 992 case REX_WB: 993 case REX_WX: 994 case REX_WXB: 995 case REX_WR: 996 case REX_WRB: 997 case REX_WRX: 998 case REX_WRXB: 999 NOT_LP64(assert(false, "found 64bit prefix")); 1000 ip++; 1001 default: 1002 ip++; 1003 } 1004 debug_only(has_disp32 = true); // has both kinds of operands! 1005 break; 1006 1007 default: 1008 ShouldNotReachHere(); 1009 1010 #undef REP8 1011 #undef REP16 1012 } 1013 1014 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); 1015 #ifdef _LP64 1016 assert(which != imm_operand, "instruction is not a movq reg, imm64"); 1017 #else 1018 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); 1019 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); 1020 #endif // LP64 1021 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); 1022 1023 // parse the output of emit_operand 1024 int op2 = 0xFF & *ip++; 1025 int base = op2 & 0x07; 1026 int op3 = -1; 1027 const int b100 = 4; 1028 const int b101 = 5; 1029 if (base == b100 && (op2 >> 6) != 3) { 1030 op3 = 0xFF & *ip++; 1031 base = op3 & 0x07; // refetch the base 1032 } 1033 // now ip points at the disp (if any) 1034 1035 switch (op2 >> 6) { 1036 case 0: 1037 // [00 reg 100][ss index base] 1038 // [00 reg 100][00 100 esp] 1039 // [00 reg base] 1040 // [00 reg 100][ss index 101][disp32] 1041 // [00 reg 101] [disp32] 1042 1043 if (base == b101) { 1044 if (which == disp32_operand) 1045 return ip; // caller wants the disp32 1046 ip += 4; // skip the disp32 1047 } 1048 break; 1049 1050 case 1: 1051 // [01 reg 100][ss index base][disp8] 1052 // [01 reg 100][00 100 esp][disp8] 1053 // [01 reg base] [disp8] 1054 ip += 1; // skip the disp8 1055 break; 1056 1057 case 2: 1058 // [10 reg 100][ss index base][disp32] 1059 // [10 reg 100][00 100 esp][disp32] 1060 // [10 reg base] [disp32] 1061 if (which == disp32_operand) 1062 return ip; // caller wants the disp32 1063 ip += 4; // skip the disp32 1064 break; 1065 1066 case 3: 1067 // [11 reg base] (not a memory addressing mode) 1068 break; 1069 } 1070 1071 if (which == end_pc_operand) { 1072 return ip + tail_size; 1073 } 1074 1075 #ifdef _LP64 1076 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); 1077 #else 1078 assert(which == imm_operand, "instruction has only an imm field"); 1079 #endif // LP64 1080 return ip; 1081 } 1082 1083 address Assembler::locate_next_instruction(address inst) { 1084 // Secretly share code with locate_operand: 1085 return locate_operand(inst, end_pc_operand); 1086 } 1087 1088 1089 #ifdef ASSERT 1090 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { 1091 address inst = inst_mark(); 1092 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); 1093 address opnd; 1094 1095 Relocation* r = rspec.reloc(); 1096 if (r->type() == relocInfo::none) { 1097 return; 1098 } else if (r->is_call() || format == call32_operand) { 1099 // assert(format == imm32_operand, "cannot specify a nonzero format"); 1100 opnd = locate_operand(inst, call32_operand); 1101 } else if (r->is_data()) { 1102 assert(format == imm_operand || format == disp32_operand 1103 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); 1104 opnd = locate_operand(inst, (WhichOperand)format); 1105 } else { 1106 assert(format == imm_operand, "cannot specify a format"); 1107 return; 1108 } 1109 assert(opnd == pc(), "must put operand where relocs can find it"); 1110 } 1111 #endif // ASSERT 1112 1113 void Assembler::emit_operand32(Register reg, Address adr) { 1114 assert(reg->encoding() < 8, "no extended registers"); 1115 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1116 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1117 adr._rspec); 1118 } 1119 1120 void Assembler::emit_operand(Register reg, Address adr, 1121 int rip_relative_correction) { 1122 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1123 adr._rspec, 1124 rip_relative_correction); 1125 } 1126 1127 void Assembler::emit_operand(XMMRegister reg, Address adr) { 1128 if (adr.isxmmindex()) { 1129 emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec); 1130 } else { 1131 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1132 adr._rspec); 1133 } 1134 } 1135 1136 // MMX operations 1137 void Assembler::emit_operand(MMXRegister reg, Address adr) { 1138 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1139 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1140 } 1141 1142 // work around gcc (3.2.1-7a) bug 1143 void Assembler::emit_operand(Address adr, MMXRegister reg) { 1144 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1145 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1146 } 1147 1148 1149 void Assembler::emit_farith(int b1, int b2, int i) { 1150 assert(isByte(b1) && isByte(b2), "wrong opcode"); 1151 assert(0 <= i && i < 8, "illegal stack offset"); 1152 emit_int8(b1); 1153 emit_int8(b2 + i); 1154 } 1155 1156 1157 // Now the Assembler instructions (identical for 32/64 bits) 1158 1159 void Assembler::adcl(Address dst, int32_t imm32) { 1160 InstructionMark im(this); 1161 prefix(dst); 1162 emit_arith_operand(0x81, rdx, dst, imm32); 1163 } 1164 1165 void Assembler::adcl(Address dst, Register src) { 1166 InstructionMark im(this); 1167 prefix(dst, src); 1168 emit_int8(0x11); 1169 emit_operand(src, dst); 1170 } 1171 1172 void Assembler::adcl(Register dst, int32_t imm32) { 1173 prefix(dst); 1174 emit_arith(0x81, 0xD0, dst, imm32); 1175 } 1176 1177 void Assembler::adcl(Register dst, Address src) { 1178 InstructionMark im(this); 1179 prefix(src, dst); 1180 emit_int8(0x13); 1181 emit_operand(dst, src); 1182 } 1183 1184 void Assembler::adcl(Register dst, Register src) { 1185 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1186 emit_arith(0x13, 0xC0, dst, src); 1187 } 1188 1189 void Assembler::addl(Address dst, int32_t imm32) { 1190 InstructionMark im(this); 1191 prefix(dst); 1192 emit_arith_operand(0x81, rax, dst, imm32); 1193 } 1194 1195 void Assembler::addb(Address dst, int imm8) { 1196 InstructionMark im(this); 1197 prefix(dst); 1198 emit_int8((unsigned char)0x80); 1199 emit_operand(rax, dst, 1); 1200 emit_int8(imm8); 1201 } 1202 1203 void Assembler::addw(Register dst, Register src) { 1204 (void)prefix_and_encode(dst->encoding(), src->encoding()); 1205 emit_arith(0x03, 0xC0, dst, src); 1206 } 1207 1208 void Assembler::addw(Address dst, int imm16) { 1209 InstructionMark im(this); 1210 emit_int8(0x66); 1211 prefix(dst); 1212 emit_int8((unsigned char)0x81); 1213 emit_operand(rax, dst, 2); 1214 emit_int16(imm16); 1215 } 1216 1217 void Assembler::addl(Address dst, Register src) { 1218 InstructionMark im(this); 1219 prefix(dst, src); 1220 emit_int8(0x01); 1221 emit_operand(src, dst); 1222 } 1223 1224 void Assembler::addl(Register dst, int32_t imm32) { 1225 prefix(dst); 1226 emit_arith(0x81, 0xC0, dst, imm32); 1227 } 1228 1229 void Assembler::addl(Register dst, Address src) { 1230 InstructionMark im(this); 1231 prefix(src, dst); 1232 emit_int8(0x03); 1233 emit_operand(dst, src); 1234 } 1235 1236 void Assembler::addl(Register dst, Register src) { 1237 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1238 emit_arith(0x03, 0xC0, dst, src); 1239 } 1240 1241 void Assembler::addr_nop_4() { 1242 assert(UseAddressNop, "no CPU support"); 1243 // 4 bytes: NOP DWORD PTR [EAX+0] 1244 emit_int8(0x0F); 1245 emit_int8(0x1F); 1246 emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); 1247 emit_int8(0); // 8-bits offset (1 byte) 1248 } 1249 1250 void Assembler::addr_nop_5() { 1251 assert(UseAddressNop, "no CPU support"); 1252 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset 1253 emit_int8(0x0F); 1254 emit_int8(0x1F); 1255 emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); 1256 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1257 emit_int8(0); // 8-bits offset (1 byte) 1258 } 1259 1260 void Assembler::addr_nop_7() { 1261 assert(UseAddressNop, "no CPU support"); 1262 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset 1263 emit_int8(0x0F); 1264 emit_int8(0x1F); 1265 emit_int8((unsigned char)0x80); 1266 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); 1267 emit_int32(0); // 32-bits offset (4 bytes) 1268 } 1269 1270 void Assembler::addr_nop_8() { 1271 assert(UseAddressNop, "no CPU support"); 1272 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset 1273 emit_int8(0x0F); 1274 emit_int8(0x1F); 1275 emit_int8((unsigned char)0x84); 1276 // emit_rm(cbuf, 0x2, EAX_enc, 0x4); 1277 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1278 emit_int32(0); // 32-bits offset (4 bytes) 1279 } 1280 1281 void Assembler::addsd(XMMRegister dst, XMMRegister src) { 1282 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1283 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1284 attributes.set_rex_vex_w_reverted(); 1285 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1286 emit_int8(0x58); 1287 emit_int8((unsigned char)(0xC0 | encode)); 1288 } 1289 1290 void Assembler::addsd(XMMRegister dst, Address src) { 1291 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1292 InstructionMark im(this); 1293 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1294 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1295 attributes.set_rex_vex_w_reverted(); 1296 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1297 emit_int8(0x58); 1298 emit_operand(dst, src); 1299 } 1300 1301 void Assembler::addss(XMMRegister dst, XMMRegister src) { 1302 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1303 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1304 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1305 emit_int8(0x58); 1306 emit_int8((unsigned char)(0xC0 | encode)); 1307 } 1308 1309 void Assembler::addss(XMMRegister dst, Address src) { 1310 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1311 InstructionMark im(this); 1312 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1313 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1314 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1315 emit_int8(0x58); 1316 emit_operand(dst, src); 1317 } 1318 1319 void Assembler::aesdec(XMMRegister dst, Address src) { 1320 assert(VM_Version::supports_aes(), ""); 1321 InstructionMark im(this); 1322 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1323 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1324 emit_int8((unsigned char)0xDE); 1325 emit_operand(dst, src); 1326 } 1327 1328 void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1329 assert(VM_Version::supports_aes(), ""); 1330 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1331 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1332 emit_int8((unsigned char)0xDE); 1333 emit_int8(0xC0 | encode); 1334 } 1335 1336 void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1337 assert(VM_Version::supports_vaes(), ""); 1338 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1339 attributes.set_is_evex_instruction(); 1340 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1341 emit_int8((unsigned char)0xDE); 1342 emit_int8((unsigned char)(0xC0 | encode)); 1343 } 1344 1345 1346 void Assembler::aesdeclast(XMMRegister dst, Address src) { 1347 assert(VM_Version::supports_aes(), ""); 1348 InstructionMark im(this); 1349 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1350 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1351 emit_int8((unsigned char)0xDF); 1352 emit_operand(dst, src); 1353 } 1354 1355 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1356 assert(VM_Version::supports_aes(), ""); 1357 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1358 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1359 emit_int8((unsigned char)0xDF); 1360 emit_int8((unsigned char)(0xC0 | encode)); 1361 } 1362 1363 void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1364 assert(VM_Version::supports_vaes(), ""); 1365 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1366 attributes.set_is_evex_instruction(); 1367 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1368 emit_int8((unsigned char)0xDF); 1369 emit_int8((unsigned char)(0xC0 | encode)); 1370 } 1371 1372 void Assembler::aesenc(XMMRegister dst, Address src) { 1373 assert(VM_Version::supports_aes(), ""); 1374 InstructionMark im(this); 1375 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1376 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1377 emit_int8((unsigned char)0xDC); 1378 emit_operand(dst, src); 1379 } 1380 1381 void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1382 assert(VM_Version::supports_aes(), ""); 1383 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1384 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1385 emit_int8((unsigned char)0xDC); 1386 emit_int8(0xC0 | encode); 1387 } 1388 1389 void Assembler::aesenclast(XMMRegister dst, Address src) { 1390 assert(VM_Version::supports_aes(), ""); 1391 InstructionMark im(this); 1392 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1393 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1394 emit_int8((unsigned char)0xDD); 1395 emit_operand(dst, src); 1396 } 1397 1398 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1399 assert(VM_Version::supports_aes(), ""); 1400 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1401 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1402 emit_int8((unsigned char)0xDD); 1403 emit_int8((unsigned char)(0xC0 | encode)); 1404 } 1405 1406 void Assembler::andw(Register dst, Register src) { 1407 (void)prefix_and_encode(dst->encoding(), src->encoding()); 1408 emit_arith(0x23, 0xC0, dst, src); 1409 } 1410 1411 void Assembler::andl(Address dst, int32_t imm32) { 1412 InstructionMark im(this); 1413 prefix(dst); 1414 emit_int8((unsigned char)0x81); 1415 emit_operand(rsp, dst, 4); 1416 emit_int32(imm32); 1417 } 1418 1419 void Assembler::andl(Register dst, int32_t imm32) { 1420 prefix(dst); 1421 emit_arith(0x81, 0xE0, dst, imm32); 1422 } 1423 1424 void Assembler::andl(Register dst, Address src) { 1425 InstructionMark im(this); 1426 prefix(src, dst); 1427 emit_int8(0x23); 1428 emit_operand(dst, src); 1429 } 1430 1431 void Assembler::andl(Register dst, Register src) { 1432 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1433 emit_arith(0x23, 0xC0, dst, src); 1434 } 1435 1436 void Assembler::andnl(Register dst, Register src1, Register src2) { 1437 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1438 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1439 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1440 emit_int8((unsigned char)0xF2); 1441 emit_int8((unsigned char)(0xC0 | encode)); 1442 } 1443 1444 void Assembler::andnl(Register dst, Register src1, Address src2) { 1445 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1446 InstructionMark im(this); 1447 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1448 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1449 emit_int8((unsigned char)0xF2); 1450 emit_operand(dst, src2); 1451 } 1452 1453 void Assembler::bsfl(Register dst, Register src) { 1454 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1455 emit_int8(0x0F); 1456 emit_int8((unsigned char)0xBC); 1457 emit_int8((unsigned char)(0xC0 | encode)); 1458 } 1459 1460 void Assembler::bsrl(Register dst, Register src) { 1461 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1462 emit_int8(0x0F); 1463 emit_int8((unsigned char)0xBD); 1464 emit_int8((unsigned char)(0xC0 | encode)); 1465 } 1466 1467 void Assembler::bswapl(Register reg) { // bswap 1468 int encode = prefix_and_encode(reg->encoding()); 1469 emit_int8(0x0F); 1470 emit_int8((unsigned char)(0xC8 | encode)); 1471 } 1472 1473 void Assembler::blsil(Register dst, Register src) { 1474 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1475 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1476 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1477 emit_int8((unsigned char)0xF3); 1478 emit_int8((unsigned char)(0xC0 | encode)); 1479 } 1480 1481 void Assembler::blsil(Register dst, Address src) { 1482 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1483 InstructionMark im(this); 1484 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1485 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1486 emit_int8((unsigned char)0xF3); 1487 emit_operand(rbx, src); 1488 } 1489 1490 void Assembler::blsmskl(Register dst, Register src) { 1491 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1492 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1493 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1494 emit_int8((unsigned char)0xF3); 1495 emit_int8((unsigned char)(0xC0 | encode)); 1496 } 1497 1498 void Assembler::blsmskl(Register dst, Address src) { 1499 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1500 InstructionMark im(this); 1501 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1502 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1503 emit_int8((unsigned char)0xF3); 1504 emit_operand(rdx, src); 1505 } 1506 1507 void Assembler::blsrl(Register dst, Register src) { 1508 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1509 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1510 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1511 emit_int8((unsigned char)0xF3); 1512 emit_int8((unsigned char)(0xC0 | encode)); 1513 } 1514 1515 void Assembler::blsrl(Register dst, Address src) { 1516 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1517 InstructionMark im(this); 1518 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1519 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1520 emit_int8((unsigned char)0xF3); 1521 emit_operand(rcx, src); 1522 } 1523 1524 void Assembler::call(Label& L, relocInfo::relocType rtype) { 1525 // suspect disp32 is always good 1526 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); 1527 1528 if (L.is_bound()) { 1529 const int long_size = 5; 1530 int offs = (int)( target(L) - pc() ); 1531 assert(offs <= 0, "assembler error"); 1532 InstructionMark im(this); 1533 // 1110 1000 #32-bit disp 1534 emit_int8((unsigned char)0xE8); 1535 emit_data(offs - long_size, rtype, operand); 1536 } else { 1537 InstructionMark im(this); 1538 // 1110 1000 #32-bit disp 1539 L.add_patch_at(code(), locator()); 1540 1541 emit_int8((unsigned char)0xE8); 1542 emit_data(int(0), rtype, operand); 1543 } 1544 } 1545 1546 void Assembler::call(Register dst) { 1547 int encode = prefix_and_encode(dst->encoding()); 1548 emit_int8((unsigned char)0xFF); 1549 emit_int8((unsigned char)(0xD0 | encode)); 1550 } 1551 1552 1553 void Assembler::call(Address adr) { 1554 InstructionMark im(this); 1555 prefix(adr); 1556 emit_int8((unsigned char)0xFF); 1557 emit_operand(rdx, adr); 1558 } 1559 1560 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1561 InstructionMark im(this); 1562 emit_int8((unsigned char)0xE8); 1563 intptr_t disp = entry - (pc() + sizeof(int32_t)); 1564 // Entry is NULL in case of a scratch emit. 1565 assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp); 1566 // Technically, should use call32_operand, but this format is 1567 // implied by the fact that we're emitting a call instruction. 1568 1569 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1570 emit_data((int) disp, rspec, operand); 1571 } 1572 1573 void Assembler::cdql() { 1574 emit_int8((unsigned char)0x99); 1575 } 1576 1577 void Assembler::cld() { 1578 emit_int8((unsigned char)0xFC); 1579 } 1580 1581 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1582 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1583 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1584 emit_int8(0x0F); 1585 emit_int8(0x40 | cc); 1586 emit_int8((unsigned char)(0xC0 | encode)); 1587 } 1588 1589 1590 void Assembler::cmovl(Condition cc, Register dst, Address src) { 1591 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1592 prefix(src, dst); 1593 emit_int8(0x0F); 1594 emit_int8(0x40 | cc); 1595 emit_operand(dst, src); 1596 } 1597 1598 void Assembler::cmpb(Address dst, int imm8) { 1599 InstructionMark im(this); 1600 prefix(dst); 1601 emit_int8((unsigned char)0x80); 1602 emit_operand(rdi, dst, 1); 1603 emit_int8(imm8); 1604 } 1605 1606 void Assembler::cmpl(Address dst, int32_t imm32) { 1607 InstructionMark im(this); 1608 prefix(dst); 1609 emit_int8((unsigned char)0x81); 1610 emit_operand(rdi, dst, 4); 1611 emit_int32(imm32); 1612 } 1613 1614 void Assembler::cmpl(Register dst, int32_t imm32) { 1615 prefix(dst); 1616 emit_arith(0x81, 0xF8, dst, imm32); 1617 } 1618 1619 void Assembler::cmpl(Register dst, Register src) { 1620 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1621 emit_arith(0x3B, 0xC0, dst, src); 1622 } 1623 1624 void Assembler::cmpl(Register dst, Address src) { 1625 InstructionMark im(this); 1626 prefix(src, dst); 1627 emit_int8((unsigned char)0x3B); 1628 emit_operand(dst, src); 1629 } 1630 1631 void Assembler::cmpw(Address dst, int imm16) { 1632 InstructionMark im(this); 1633 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); 1634 emit_int8(0x66); 1635 emit_int8((unsigned char)0x81); 1636 emit_operand(rdi, dst, 2); 1637 emit_int16(imm16); 1638 } 1639 1640 // The 32-bit cmpxchg compares the value at adr with the contents of rax, 1641 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1642 // The ZF is set if the compared values were equal, and cleared otherwise. 1643 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg 1644 InstructionMark im(this); 1645 prefix(adr, reg); 1646 emit_int8(0x0F); 1647 emit_int8((unsigned char)0xB1); 1648 emit_operand(reg, adr); 1649 } 1650 1651 // The 8-bit cmpxchg compares the value at adr with the contents of rax, 1652 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1653 // The ZF is set if the compared values were equal, and cleared otherwise. 1654 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg 1655 InstructionMark im(this); 1656 prefix(adr, reg, true); 1657 emit_int8(0x0F); 1658 emit_int8((unsigned char)0xB0); 1659 emit_operand(reg, adr); 1660 } 1661 1662 void Assembler::comisd(XMMRegister dst, Address src) { 1663 // NOTE: dbx seems to decode this as comiss even though the 1664 // 0x66 is there. Strangly ucomisd comes out correct 1665 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1666 InstructionMark im(this); 1667 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);; 1668 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1669 attributes.set_rex_vex_w_reverted(); 1670 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1671 emit_int8(0x2F); 1672 emit_operand(dst, src); 1673 } 1674 1675 void Assembler::comisd(XMMRegister dst, XMMRegister src) { 1676 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1677 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1678 attributes.set_rex_vex_w_reverted(); 1679 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1680 emit_int8(0x2F); 1681 emit_int8((unsigned char)(0xC0 | encode)); 1682 } 1683 1684 void Assembler::comiss(XMMRegister dst, Address src) { 1685 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1686 InstructionMark im(this); 1687 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1688 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1689 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1690 emit_int8(0x2F); 1691 emit_operand(dst, src); 1692 } 1693 1694 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1695 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1696 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1697 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1698 emit_int8(0x2F); 1699 emit_int8((unsigned char)(0xC0 | encode)); 1700 } 1701 1702 void Assembler::cpuid() { 1703 emit_int8(0x0F); 1704 emit_int8((unsigned char)0xA2); 1705 } 1706 1707 // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented 1708 // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v 1709 // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. - 1710 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. - 1711 // 1712 // F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v 1713 // 1714 // F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v 1715 // 1716 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v 1717 void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) { 1718 assert(VM_Version::supports_sse4_2(), ""); 1719 int8_t w = 0x01; 1720 Prefix p = Prefix_EMPTY; 1721 1722 emit_int8((int8_t)0xF2); 1723 switch (sizeInBytes) { 1724 case 1: 1725 w = 0; 1726 break; 1727 case 2: 1728 case 4: 1729 break; 1730 LP64_ONLY(case 8:) 1731 // This instruction is not valid in 32 bits 1732 // Note: 1733 // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf 1734 // 1735 // Page B - 72 Vol. 2C says 1736 // qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2 1737 // mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m 1738 // F0!!! 1739 // while 3 - 208 Vol. 2A 1740 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64. 1741 // 1742 // the 0 on a last bit is reserved for a different flavor of this instruction : 1743 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8. 1744 p = REX_W; 1745 break; 1746 default: 1747 assert(0, "Unsupported value for a sizeInBytes argument"); 1748 break; 1749 } 1750 LP64_ONLY(prefix(crc, v, p);) 1751 emit_int8((int8_t)0x0F); 1752 emit_int8(0x38); 1753 emit_int8((int8_t)(0xF0 | w)); 1754 emit_int8(0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7)); 1755 } 1756 1757 void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) { 1758 assert(VM_Version::supports_sse4_2(), ""); 1759 InstructionMark im(this); 1760 int8_t w = 0x01; 1761 Prefix p = Prefix_EMPTY; 1762 1763 emit_int8((int8_t)0xF2); 1764 switch (sizeInBytes) { 1765 case 1: 1766 w = 0; 1767 break; 1768 case 2: 1769 case 4: 1770 break; 1771 LP64_ONLY(case 8:) 1772 // This instruction is not valid in 32 bits 1773 p = REX_W; 1774 break; 1775 default: 1776 assert(0, "Unsupported value for a sizeInBytes argument"); 1777 break; 1778 } 1779 LP64_ONLY(prefix(crc, adr, p);) 1780 emit_int8((int8_t)0x0F); 1781 emit_int8(0x38); 1782 emit_int8((int8_t)(0xF0 | w)); 1783 emit_operand(crc, adr); 1784 } 1785 1786 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1787 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1788 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 1789 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1790 emit_int8((unsigned char)0xE6); 1791 emit_int8((unsigned char)(0xC0 | encode)); 1792 } 1793 1794 void Assembler::vcvtdq2pd(XMMRegister dst, XMMRegister src, int vector_len) { 1795 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 1796 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1797 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1798 emit_int8((unsigned char)0xE6); 1799 emit_int8((unsigned char)(0xC0 | encode)); 1800 } 1801 1802 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { 1803 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1804 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 1805 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1806 emit_int8(0x5B); 1807 emit_int8((unsigned char)(0xC0 | encode)); 1808 } 1809 1810 void Assembler::vcvtdq2ps(XMMRegister dst, XMMRegister src, int vector_len) { 1811 assert(vector_len <= AVX_256bit ? VM_Version::supports_avx() : VM_Version::supports_evex(), ""); 1812 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 1813 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1814 emit_int8(0x5B); 1815 emit_int8((unsigned char)(0xC0 | encode)); 1816 } 1817 1818 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1819 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1820 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1821 attributes.set_rex_vex_w_reverted(); 1822 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1823 emit_int8(0x5A); 1824 emit_int8((unsigned char)(0xC0 | encode)); 1825 } 1826 1827 void Assembler::cvtsd2ss(XMMRegister dst, Address src) { 1828 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1829 InstructionMark im(this); 1830 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1831 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1832 attributes.set_rex_vex_w_reverted(); 1833 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1834 emit_int8(0x5A); 1835 emit_operand(dst, src); 1836 } 1837 1838 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { 1839 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1840 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1841 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1842 emit_int8(0x2A); 1843 emit_int8((unsigned char)(0xC0 | encode)); 1844 } 1845 1846 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { 1847 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1848 InstructionMark im(this); 1849 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1850 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1851 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1852 emit_int8(0x2A); 1853 emit_operand(dst, src); 1854 } 1855 1856 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { 1857 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1858 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1859 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1860 emit_int8(0x2A); 1861 emit_int8((unsigned char)(0xC0 | encode)); 1862 } 1863 1864 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { 1865 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1866 InstructionMark im(this); 1867 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1868 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1869 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1870 emit_int8(0x2A); 1871 emit_operand(dst, src); 1872 } 1873 1874 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { 1875 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1876 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1877 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1878 emit_int8(0x2A); 1879 emit_int8((unsigned char)(0xC0 | encode)); 1880 } 1881 1882 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 1883 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1884 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1885 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1886 emit_int8(0x5A); 1887 emit_int8((unsigned char)(0xC0 | encode)); 1888 } 1889 1890 void Assembler::cvtss2sd(XMMRegister dst, Address src) { 1891 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1892 InstructionMark im(this); 1893 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1894 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1895 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1896 emit_int8(0x5A); 1897 emit_operand(dst, src); 1898 } 1899 1900 1901 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { 1902 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1903 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1904 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1905 emit_int8(0x2C); 1906 emit_int8((unsigned char)(0xC0 | encode)); 1907 } 1908 1909 void Assembler::cvttss2sil(Register dst, XMMRegister src) { 1910 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1911 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1912 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1913 emit_int8(0x2C); 1914 emit_int8((unsigned char)(0xC0 | encode)); 1915 } 1916 1917 void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) { 1918 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1919 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 1920 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 1921 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1922 emit_int8((unsigned char)0xE6); 1923 emit_int8((unsigned char)(0xC0 | encode)); 1924 } 1925 1926 void Assembler::vcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len) { 1927 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 1928 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1929 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1930 emit_int8((unsigned char)0x5A); 1931 emit_int8((unsigned char)(0xC0 | encode)); 1932 } 1933 1934 void Assembler::evcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len) { 1935 assert(UseAVX > 2, ""); 1936 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 1937 attributes.set_is_evex_instruction(); 1938 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1939 emit_int8((unsigned char)0x5A); 1940 emit_int8((unsigned char)(0xC0 | encode)); 1941 } 1942 1943 void Assembler::pabsb(XMMRegister dst, XMMRegister src) { 1944 assert(VM_Version::supports_ssse3(), ""); 1945 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1946 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1947 emit_int8(0x1C); 1948 emit_int8((unsigned char)(0xC0 | encode)); 1949 } 1950 1951 void Assembler::pabsw(XMMRegister dst, XMMRegister src) { 1952 assert(VM_Version::supports_ssse3(), ""); 1953 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1954 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1955 emit_int8(0x1D); 1956 emit_int8((unsigned char)(0xC0 | encode)); 1957 } 1958 1959 void Assembler::vcvtpd2ps(XMMRegister dst, XMMRegister src, int vector_len) { 1960 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 1961 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1962 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1963 emit_int8((unsigned char)0x5A); 1964 emit_int8((unsigned char)(0xC0 | encode)); 1965 } 1966 1967 void Assembler::evcvtpd2ps(XMMRegister dst, XMMRegister src, int vector_len) { 1968 assert(UseAVX > 2, ""); 1969 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 1970 attributes.set_is_evex_instruction(); 1971 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1972 emit_int8((unsigned char)0x5A); 1973 emit_int8((unsigned char)(0xC0 | encode)); 1974 } 1975 1976 void Assembler::evcvtqq2ps(XMMRegister dst, XMMRegister src, int vector_len) { 1977 assert(UseAVX > 2 && VM_Version::supports_avx512dq(), ""); 1978 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1979 attributes.set_is_evex_instruction(); 1980 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1981 emit_int8((unsigned char)0x5B); 1982 emit_int8((unsigned char)(0xC0 | encode)); 1983 } 1984 1985 void Assembler::evcvtqq2pd(XMMRegister dst, XMMRegister src, int vector_len) { 1986 assert(UseAVX > 2 && VM_Version::supports_avx512dq(), ""); 1987 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1988 attributes.set_is_evex_instruction(); 1989 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1990 emit_int8((unsigned char)0xE6); 1991 emit_int8((unsigned char)(0xC0 | encode)); 1992 } 1993 1994 void Assembler::evpmovwb(XMMRegister dst, XMMRegister src, int vector_len) { 1995 assert(UseAVX > 2, ""); 1996 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1997 attributes.set_is_evex_instruction(); 1998 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 1999 emit_int8((unsigned char)0x30); 2000 emit_int8((unsigned char)(0xC0 | encode)); 2001 } 2002 2003 void Assembler::evpmovdw(XMMRegister dst, XMMRegister src, int vector_len) { 2004 assert(UseAVX > 2, ""); 2005 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2006 attributes.set_is_evex_instruction(); 2007 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2008 emit_int8((unsigned char)0x33); 2009 emit_int8((unsigned char)(0xC0 | encode)); 2010 } 2011 2012 void Assembler::evpmovdb(XMMRegister dst, XMMRegister src, int vector_len) { 2013 assert(UseAVX > 2, ""); 2014 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2015 attributes.set_is_evex_instruction(); 2016 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2017 emit_int8((unsigned char)0x31); 2018 emit_int8((unsigned char)(0xC0 | encode)); 2019 } 2020 2021 void Assembler::evpmovqd(XMMRegister dst, XMMRegister src, int vector_len) { 2022 assert(UseAVX > 2, ""); 2023 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2024 attributes.set_is_evex_instruction(); 2025 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2026 emit_int8((unsigned char)0x35); 2027 emit_int8((unsigned char)(0xC0 | encode)); 2028 } 2029 2030 void Assembler::evpmovqb(XMMRegister dst, XMMRegister src, int vector_len) { 2031 assert(UseAVX > 2, ""); 2032 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2033 attributes.set_is_evex_instruction(); 2034 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2035 emit_int8((unsigned char)0x32); 2036 emit_int8((unsigned char)(0xC0 | encode)); 2037 } 2038 2039 void Assembler::evpmovqw(XMMRegister dst, XMMRegister src, int vector_len) { 2040 assert(UseAVX > 2, ""); 2041 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2042 attributes.set_is_evex_instruction(); 2043 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 2044 emit_int8((unsigned char)0x34); 2045 emit_int8((unsigned char)(0xC0 | encode)); 2046 } 2047 2048 void Assembler::pabsd(XMMRegister dst, XMMRegister src) { 2049 assert(VM_Version::supports_ssse3(), ""); 2050 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2051 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2052 emit_int8(0x1E); 2053 emit_int8((unsigned char)(0xC0 | encode)); 2054 } 2055 2056 void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) { 2057 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 2058 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2059 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2060 emit_int8((unsigned char)0x1C); 2061 emit_int8((unsigned char)(0xC0 | encode)); 2062 } 2063 2064 void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) { 2065 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 2066 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2067 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2068 emit_int8((unsigned char)0x1D); 2069 emit_int8((unsigned char)(0xC0 | encode)); 2070 } 2071 2072 void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) { 2073 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 2074 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2075 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2076 emit_int8((unsigned char)0x1E); 2077 emit_int8((unsigned char)(0xC0 | encode)); 2078 } 2079 2080 void Assembler::evpabsb(XMMRegister dst, XMMRegister src, int vector_len) { 2081 assert(UseAVX > 2, ""); 2082 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2083 attributes.set_is_evex_instruction(); 2084 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2085 emit_int8((unsigned char)0x1C); 2086 emit_int8((unsigned char)(0xC0 | encode)); 2087 } 2088 2089 void Assembler::evpabsw(XMMRegister dst, XMMRegister src, int vector_len) { 2090 assert(UseAVX > 2, ""); 2091 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2092 attributes.set_is_evex_instruction(); 2093 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2094 emit_int8((unsigned char)0x1D); 2095 emit_int8((unsigned char)(0xC0 | encode)); 2096 } 2097 2098 void Assembler::evpabsd(XMMRegister dst, XMMRegister src, int vector_len) { 2099 assert(UseAVX > 2, ""); 2100 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2101 attributes.set_is_evex_instruction(); 2102 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2103 emit_int8((unsigned char)0x1E); 2104 emit_int8((unsigned char)(0xC0 | encode)); 2105 } 2106 2107 void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) { 2108 assert(UseAVX > 2, ""); 2109 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2110 attributes.set_is_evex_instruction(); 2111 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 2112 emit_int8((unsigned char)0x1F); 2113 emit_int8((unsigned char)(0xC0 | encode)); 2114 } 2115 2116 void Assembler::decl(Address dst) { 2117 // Don't use it directly. Use MacroAssembler::decrement() instead. 2118 InstructionMark im(this); 2119 prefix(dst); 2120 emit_int8((unsigned char)0xFF); 2121 emit_operand(rcx, dst); 2122 } 2123 2124 void Assembler::divsd(XMMRegister dst, Address src) { 2125 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2126 InstructionMark im(this); 2127 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2128 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2129 attributes.set_rex_vex_w_reverted(); 2130 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2131 emit_int8(0x5E); 2132 emit_operand(dst, src); 2133 } 2134 2135 void Assembler::divsd(XMMRegister dst, XMMRegister src) { 2136 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2137 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2138 attributes.set_rex_vex_w_reverted(); 2139 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2140 emit_int8(0x5E); 2141 emit_int8((unsigned char)(0xC0 | encode)); 2142 } 2143 2144 void Assembler::divss(XMMRegister dst, Address src) { 2145 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2146 InstructionMark im(this); 2147 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2148 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2149 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2150 emit_int8(0x5E); 2151 emit_operand(dst, src); 2152 } 2153 2154 void Assembler::divss(XMMRegister dst, XMMRegister src) { 2155 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2156 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2157 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2158 emit_int8(0x5E); 2159 emit_int8((unsigned char)(0xC0 | encode)); 2160 } 2161 2162 void Assembler::emms() { 2163 NOT_LP64(assert(VM_Version::supports_mmx(), "")); 2164 emit_int8(0x0F); 2165 emit_int8(0x77); 2166 } 2167 2168 void Assembler::hlt() { 2169 emit_int8((unsigned char)0xF4); 2170 } 2171 2172 void Assembler::idivl(Register src) { 2173 int encode = prefix_and_encode(src->encoding()); 2174 emit_int8((unsigned char)0xF7); 2175 emit_int8((unsigned char)(0xF8 | encode)); 2176 } 2177 2178 void Assembler::divl(Register src) { // Unsigned 2179 int encode = prefix_and_encode(src->encoding()); 2180 emit_int8((unsigned char)0xF7); 2181 emit_int8((unsigned char)(0xF0 | encode)); 2182 } 2183 2184 void Assembler::imull(Register src) { 2185 int encode = prefix_and_encode(src->encoding()); 2186 emit_int8((unsigned char)0xF7); 2187 emit_int8((unsigned char)(0xE8 | encode)); 2188 } 2189 2190 void Assembler::imull(Register dst, Register src) { 2191 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2192 emit_int8(0x0F); 2193 emit_int8((unsigned char)0xAF); 2194 emit_int8((unsigned char)(0xC0 | encode)); 2195 } 2196 2197 2198 void Assembler::imull(Register dst, Register src, int value) { 2199 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2200 if (is8bit(value)) { 2201 emit_int8(0x6B); 2202 emit_int8((unsigned char)(0xC0 | encode)); 2203 emit_int8(value & 0xFF); 2204 } else { 2205 emit_int8(0x69); 2206 emit_int8((unsigned char)(0xC0 | encode)); 2207 emit_int32(value); 2208 } 2209 } 2210 2211 void Assembler::imull(Register dst, Address src) { 2212 InstructionMark im(this); 2213 prefix(src, dst); 2214 emit_int8(0x0F); 2215 emit_int8((unsigned char) 0xAF); 2216 emit_operand(dst, src); 2217 } 2218 2219 2220 void Assembler::incl(Address dst) { 2221 // Don't use it directly. Use MacroAssembler::increment() instead. 2222 InstructionMark im(this); 2223 prefix(dst); 2224 emit_int8((unsigned char)0xFF); 2225 emit_operand(rax, dst); 2226 } 2227 2228 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { 2229 InstructionMark im(this); 2230 assert((0 <= cc) && (cc < 16), "illegal cc"); 2231 if (L.is_bound()) { 2232 address dst = target(L); 2233 assert(dst != NULL, "jcc most probably wrong"); 2234 2235 const int short_size = 2; 2236 const int long_size = 6; 2237 intptr_t offs = (intptr_t)dst - (intptr_t)pc(); 2238 if (maybe_short && is8bit(offs - short_size)) { 2239 // 0111 tttn #8-bit disp 2240 emit_int8(0x70 | cc); 2241 emit_int8((offs - short_size) & 0xFF); 2242 } else { 2243 // 0000 1111 1000 tttn #32-bit disp 2244 assert(is_simm32(offs - long_size), 2245 "must be 32bit offset (call4)"); 2246 emit_int8(0x0F); 2247 emit_int8((unsigned char)(0x80 | cc)); 2248 emit_int32(offs - long_size); 2249 } 2250 } else { 2251 // Note: could eliminate cond. jumps to this jump if condition 2252 // is the same however, seems to be rather unlikely case. 2253 // Note: use jccb() if label to be bound is very close to get 2254 // an 8-bit displacement 2255 L.add_patch_at(code(), locator()); 2256 emit_int8(0x0F); 2257 emit_int8((unsigned char)(0x80 | cc)); 2258 emit_int32(0); 2259 } 2260 } 2261 2262 void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) { 2263 if (L.is_bound()) { 2264 const int short_size = 2; 2265 address entry = target(L); 2266 #ifdef ASSERT 2267 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2268 intptr_t delta = short_branch_delta(); 2269 if (delta != 0) { 2270 dist += (dist < 0 ? (-delta) :delta); 2271 } 2272 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2273 #endif 2274 intptr_t offs = (intptr_t)entry - (intptr_t)pc(); 2275 // 0111 tttn #8-bit disp 2276 emit_int8(0x70 | cc); 2277 emit_int8((offs - short_size) & 0xFF); 2278 } else { 2279 InstructionMark im(this); 2280 L.add_patch_at(code(), locator(), file, line); 2281 emit_int8(0x70 | cc); 2282 emit_int8(0); 2283 } 2284 } 2285 2286 void Assembler::jmp(Address adr) { 2287 InstructionMark im(this); 2288 prefix(adr); 2289 emit_int8((unsigned char)0xFF); 2290 emit_operand(rsp, adr); 2291 } 2292 2293 void Assembler::jmp(Label& L, bool maybe_short) { 2294 if (L.is_bound()) { 2295 address entry = target(L); 2296 assert(entry != NULL, "jmp most probably wrong"); 2297 InstructionMark im(this); 2298 const int short_size = 2; 2299 const int long_size = 5; 2300 intptr_t offs = entry - pc(); 2301 if (maybe_short && is8bit(offs - short_size)) { 2302 emit_int8((unsigned char)0xEB); 2303 emit_int8((offs - short_size) & 0xFF); 2304 } else { 2305 emit_int8((unsigned char)0xE9); 2306 emit_int32(offs - long_size); 2307 } 2308 } else { 2309 // By default, forward jumps are always 32-bit displacements, since 2310 // we can't yet know where the label will be bound. If you're sure that 2311 // the forward jump will not run beyond 256 bytes, use jmpb to 2312 // force an 8-bit displacement. 2313 InstructionMark im(this); 2314 L.add_patch_at(code(), locator()); 2315 emit_int8((unsigned char)0xE9); 2316 emit_int32(0); 2317 } 2318 } 2319 2320 void Assembler::jmp(Register entry) { 2321 int encode = prefix_and_encode(entry->encoding()); 2322 emit_int8((unsigned char)0xFF); 2323 emit_int8((unsigned char)(0xE0 | encode)); 2324 } 2325 2326 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 2327 InstructionMark im(this); 2328 emit_int8((unsigned char)0xE9); 2329 assert(dest != NULL, "must have a target"); 2330 intptr_t disp = dest - (pc() + sizeof(int32_t)); 2331 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 2332 emit_data(disp, rspec.reloc(), call32_operand); 2333 } 2334 2335 void Assembler::jmpb_0(Label& L, const char* file, int line) { 2336 if (L.is_bound()) { 2337 const int short_size = 2; 2338 address entry = target(L); 2339 assert(entry != NULL, "jmp most probably wrong"); 2340 #ifdef ASSERT 2341 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2342 intptr_t delta = short_branch_delta(); 2343 if (delta != 0) { 2344 dist += (dist < 0 ? (-delta) :delta); 2345 } 2346 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2347 #endif 2348 intptr_t offs = entry - pc(); 2349 emit_int8((unsigned char)0xEB); 2350 emit_int8((offs - short_size) & 0xFF); 2351 } else { 2352 InstructionMark im(this); 2353 L.add_patch_at(code(), locator(), file, line); 2354 emit_int8((unsigned char)0xEB); 2355 emit_int8(0); 2356 } 2357 } 2358 2359 void Assembler::ldmxcsr( Address src) { 2360 if (UseAVX > 0 ) { 2361 InstructionMark im(this); 2362 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2363 vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2364 emit_int8((unsigned char)0xAE); 2365 emit_operand(as_Register(2), src); 2366 } else { 2367 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2368 InstructionMark im(this); 2369 prefix(src); 2370 emit_int8(0x0F); 2371 emit_int8((unsigned char)0xAE); 2372 emit_operand(as_Register(2), src); 2373 } 2374 } 2375 2376 void Assembler::leal(Register dst, Address src) { 2377 InstructionMark im(this); 2378 #ifdef _LP64 2379 emit_int8(0x67); // addr32 2380 prefix(src, dst); 2381 #endif // LP64 2382 emit_int8((unsigned char)0x8D); 2383 emit_operand(dst, src); 2384 } 2385 2386 void Assembler::lfence() { 2387 emit_int8(0x0F); 2388 emit_int8((unsigned char)0xAE); 2389 emit_int8((unsigned char)0xE8); 2390 } 2391 2392 void Assembler::lock() { 2393 emit_int8((unsigned char)0xF0); 2394 } 2395 2396 void Assembler::lzcntl(Register dst, Register src) { 2397 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 2398 emit_int8((unsigned char)0xF3); 2399 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2400 emit_int8(0x0F); 2401 emit_int8((unsigned char)0xBD); 2402 emit_int8((unsigned char)(0xC0 | encode)); 2403 } 2404 2405 // Emit mfence instruction 2406 void Assembler::mfence() { 2407 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2408 emit_int8(0x0F); 2409 emit_int8((unsigned char)0xAE); 2410 emit_int8((unsigned char)0xF0); 2411 } 2412 2413 void Assembler::mov(Register dst, Register src) { 2414 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2415 } 2416 2417 void Assembler::movapd(XMMRegister dst, XMMRegister src) { 2418 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2419 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2420 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2421 attributes.set_rex_vex_w_reverted(); 2422 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2423 emit_int8(0x28); 2424 emit_int8((unsigned char)(0xC0 | encode)); 2425 } 2426 2427 void Assembler::movaps(XMMRegister dst, XMMRegister src) { 2428 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2429 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2430 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2431 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2432 emit_int8(0x28); 2433 emit_int8((unsigned char)(0xC0 | encode)); 2434 } 2435 2436 void Assembler::movlhps(XMMRegister dst, XMMRegister src) { 2437 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2438 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2439 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2440 emit_int8(0x16); 2441 emit_int8((unsigned char)(0xC0 | encode)); 2442 } 2443 2444 void Assembler::movb(Register dst, Address src) { 2445 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 2446 InstructionMark im(this); 2447 prefix(src, dst, true); 2448 emit_int8((unsigned char)0x8A); 2449 emit_operand(dst, src); 2450 } 2451 2452 void Assembler::movddup(XMMRegister dst, XMMRegister src) { 2453 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 2454 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2455 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2456 attributes.set_rex_vex_w_reverted(); 2457 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2458 emit_int8(0x12); 2459 emit_int8(0xC0 | encode); 2460 } 2461 2462 void Assembler::kmovbl(KRegister dst, Register src) { 2463 assert(VM_Version::supports_avx512dq(), ""); 2464 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2465 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2466 emit_int8((unsigned char)0x92); 2467 emit_int8((unsigned char)(0xC0 | encode)); 2468 } 2469 2470 void Assembler::kmovbl(Register dst, KRegister src) { 2471 assert(VM_Version::supports_avx512dq(), ""); 2472 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2473 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2474 emit_int8((unsigned char)0x93); 2475 emit_int8((unsigned char)(0xC0 | encode)); 2476 } 2477 2478 void Assembler::kmovwl(KRegister dst, Register src) { 2479 assert(VM_Version::supports_evex(), ""); 2480 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2481 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2482 emit_int8((unsigned char)0x92); 2483 emit_int8((unsigned char)(0xC0 | encode)); 2484 } 2485 2486 void Assembler::kmovwl(Register dst, KRegister src) { 2487 assert(VM_Version::supports_evex(), ""); 2488 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2489 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2490 emit_int8((unsigned char)0x93); 2491 emit_int8((unsigned char)(0xC0 | encode)); 2492 } 2493 2494 void Assembler::kmovwl(KRegister dst, Address src) { 2495 assert(VM_Version::supports_evex(), ""); 2496 InstructionMark im(this); 2497 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2498 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2499 emit_int8((unsigned char)0x90); 2500 emit_operand((Register)dst, src); 2501 } 2502 2503 void Assembler::kmovdl(KRegister dst, Register src) { 2504 assert(VM_Version::supports_avx512bw(), ""); 2505 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2506 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2507 emit_int8((unsigned char)0x92); 2508 emit_int8((unsigned char)(0xC0 | encode)); 2509 } 2510 2511 void Assembler::kmovdl(Register dst, KRegister src) { 2512 assert(VM_Version::supports_avx512bw(), ""); 2513 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2514 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2515 emit_int8((unsigned char)0x93); 2516 emit_int8((unsigned char)(0xC0 | encode)); 2517 } 2518 2519 void Assembler::kmovql(KRegister dst, KRegister src) { 2520 assert(VM_Version::supports_avx512bw(), ""); 2521 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2522 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2523 emit_int8((unsigned char)0x90); 2524 emit_int8((unsigned char)(0xC0 | encode)); 2525 } 2526 2527 void Assembler::kmovql(KRegister dst, Address src) { 2528 assert(VM_Version::supports_avx512bw(), ""); 2529 InstructionMark im(this); 2530 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2531 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2532 emit_int8((unsigned char)0x90); 2533 emit_operand((Register)dst, src); 2534 } 2535 2536 void Assembler::kmovql(Address dst, KRegister src) { 2537 assert(VM_Version::supports_avx512bw(), ""); 2538 InstructionMark im(this); 2539 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2540 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2541 emit_int8((unsigned char)0x90); 2542 emit_operand((Register)src, dst); 2543 } 2544 2545 void Assembler::kmovql(KRegister dst, Register src) { 2546 assert(VM_Version::supports_avx512bw(), ""); 2547 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2548 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2549 emit_int8((unsigned char)0x92); 2550 emit_int8((unsigned char)(0xC0 | encode)); 2551 } 2552 2553 void Assembler::kmovql(Register dst, KRegister src) { 2554 assert(VM_Version::supports_avx512bw(), ""); 2555 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2556 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2557 emit_int8((unsigned char)0x93); 2558 emit_int8((unsigned char)(0xC0 | encode)); 2559 } 2560 2561 void Assembler::knotwl(KRegister dst, KRegister src) { 2562 assert(VM_Version::supports_evex(), ""); 2563 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2564 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2565 emit_int8((unsigned char)0x44); 2566 emit_int8((unsigned char)(0xC0 | encode)); 2567 } 2568 2569 // This instruction produces ZF or CF flags 2570 void Assembler::kortestbl(KRegister src1, KRegister src2) { 2571 assert(VM_Version::supports_avx512dq(), ""); 2572 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2573 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2574 emit_int8((unsigned char)0x98); 2575 emit_int8((unsigned char)(0xC0 | encode)); 2576 } 2577 2578 // This instruction produces ZF or CF flags 2579 void Assembler::kortestwl(KRegister src1, KRegister src2) { 2580 assert(VM_Version::supports_evex(), ""); 2581 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2582 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2583 emit_int8((unsigned char)0x98); 2584 emit_int8((unsigned char)(0xC0 | encode)); 2585 } 2586 2587 // This instruction produces ZF or CF flags 2588 void Assembler::kortestdl(KRegister src1, KRegister src2) { 2589 assert(VM_Version::supports_avx512bw(), ""); 2590 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2591 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2592 emit_int8((unsigned char)0x98); 2593 emit_int8((unsigned char)(0xC0 | encode)); 2594 } 2595 2596 // This instruction produces ZF or CF flags 2597 void Assembler::kortestql(KRegister src1, KRegister src2) { 2598 assert(VM_Version::supports_avx512bw(), ""); 2599 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2600 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2601 emit_int8((unsigned char)0x98); 2602 emit_int8((unsigned char)(0xC0 | encode)); 2603 } 2604 2605 // This instruction produces ZF or CF flags 2606 void Assembler::ktestql(KRegister src1, KRegister src2) { 2607 assert(VM_Version::supports_avx512bw(), ""); 2608 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2609 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2610 emit_int8((unsigned char)0x99); 2611 emit_int8((unsigned char)(0xC0 | encode)); 2612 } 2613 2614 void Assembler::ktestq(KRegister src1, KRegister src2) { 2615 assert(VM_Version::supports_avx512bw(), ""); 2616 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2617 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2618 emit_int8((unsigned char)0x99); 2619 emit_int8((unsigned char)(0xC0 | encode)); 2620 } 2621 2622 void Assembler::ktestd(KRegister src1, KRegister src2) { 2623 assert(VM_Version::supports_avx512bw(), ""); 2624 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2625 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2626 emit_int8((unsigned char)0x99); 2627 emit_int8((unsigned char)(0xC0 | encode)); 2628 } 2629 2630 void Assembler::movb(Address dst, int imm8) { 2631 InstructionMark im(this); 2632 prefix(dst); 2633 emit_int8((unsigned char)0xC6); 2634 emit_operand(rax, dst, 1); 2635 emit_int8(imm8); 2636 } 2637 2638 2639 void Assembler::movb(Address dst, Register src) { 2640 assert(src->has_byte_register(), "must have byte register"); 2641 InstructionMark im(this); 2642 prefix(dst, src, true); 2643 emit_int8((unsigned char)0x88); 2644 emit_operand(src, dst); 2645 } 2646 2647 void Assembler::movdl(XMMRegister dst, Register src) { 2648 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2649 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2650 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2651 emit_int8(0x6E); 2652 emit_int8((unsigned char)(0xC0 | encode)); 2653 } 2654 2655 void Assembler::movdl(Register dst, XMMRegister src) { 2656 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2657 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2658 // swap src/dst to get correct prefix 2659 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2660 emit_int8(0x7E); 2661 emit_int8((unsigned char)(0xC0 | encode)); 2662 } 2663 2664 void Assembler::movdl(XMMRegister dst, Address src) { 2665 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2666 InstructionMark im(this); 2667 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2668 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2669 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2670 emit_int8(0x6E); 2671 emit_operand(dst, src); 2672 } 2673 2674 void Assembler::movdl(Address dst, XMMRegister src) { 2675 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2676 InstructionMark im(this); 2677 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2678 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2679 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2680 emit_int8(0x7E); 2681 emit_operand(src, dst); 2682 } 2683 2684 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { 2685 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2686 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2687 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2688 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2689 emit_int8(0x6F); 2690 emit_int8((unsigned char)(0xC0 | encode)); 2691 } 2692 2693 void Assembler::movdqa(XMMRegister dst, Address src) { 2694 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2695 InstructionMark im(this); 2696 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2697 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2698 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2699 emit_int8(0x6F); 2700 emit_operand(dst, src); 2701 } 2702 2703 void Assembler::movdqu(XMMRegister dst, Address src) { 2704 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2705 InstructionMark im(this); 2706 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2707 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2708 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2709 emit_int8(0x6F); 2710 emit_operand(dst, src); 2711 } 2712 2713 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { 2714 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2715 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2716 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2717 emit_int8(0x6F); 2718 emit_int8((unsigned char)(0xC0 | encode)); 2719 } 2720 2721 void Assembler::movdqu(Address dst, XMMRegister src) { 2722 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2723 InstructionMark im(this); 2724 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2725 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2726 attributes.reset_is_clear_context(); 2727 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2728 emit_int8(0x7F); 2729 emit_operand(src, dst); 2730 } 2731 2732 // Move Unaligned 256bit Vector 2733 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2734 assert(UseAVX > 0, ""); 2735 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2736 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2737 emit_int8(0x6F); 2738 emit_int8((unsigned char)(0xC0 | encode)); 2739 } 2740 2741 void Assembler::vmovdqu(XMMRegister dst, Address src) { 2742 assert(UseAVX > 0, ""); 2743 InstructionMark im(this); 2744 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2745 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2746 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2747 emit_int8(0x6F); 2748 emit_operand(dst, src); 2749 } 2750 2751 void Assembler::vmovdqu(Address dst, XMMRegister src) { 2752 assert(UseAVX > 0, ""); 2753 InstructionMark im(this); 2754 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2755 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2756 attributes.reset_is_clear_context(); 2757 // swap src<->dst for encoding 2758 assert(src != xnoreg, "sanity"); 2759 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2760 emit_int8(0x7F); 2761 emit_operand(src, dst); 2762 } 2763 2764 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64) 2765 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { 2766 assert(VM_Version::supports_evex(), ""); 2767 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2768 attributes.set_is_evex_instruction(); 2769 if (merge) { 2770 attributes.reset_is_clear_context(); 2771 } 2772 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2773 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2774 emit_int8(0x6F); 2775 emit_int8((unsigned char)(0xC0 | encode)); 2776 } 2777 2778 void Assembler::evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { 2779 assert(VM_Version::supports_evex(), ""); 2780 InstructionMark im(this); 2781 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2782 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2783 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2784 attributes.set_is_evex_instruction(); 2785 if (merge) { 2786 attributes.reset_is_clear_context(); 2787 } 2788 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2789 emit_int8(0x6F); 2790 emit_operand(dst, src); 2791 } 2792 2793 void Assembler::evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { 2794 assert(VM_Version::supports_evex(), ""); 2795 assert(src != xnoreg, "sanity"); 2796 InstructionMark im(this); 2797 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2798 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2799 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2800 attributes.set_is_evex_instruction(); 2801 if (merge) { 2802 attributes.reset_is_clear_context(); 2803 } 2804 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2805 emit_int8(0x7F); 2806 emit_operand(src, dst); 2807 } 2808 2809 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 2810 assert(VM_Version::supports_avx512vlbw(), ""); 2811 InstructionMark im(this); 2812 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 2813 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2814 attributes.set_embedded_opmask_register_specifier(mask); 2815 attributes.set_is_evex_instruction(); 2816 if (merge) { 2817 attributes.reset_is_clear_context(); 2818 } 2819 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2820 emit_int8(0x6F); 2821 emit_operand(dst, src); 2822 } 2823 2824 void Assembler::evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { 2825 assert(VM_Version::supports_evex(), ""); 2826 InstructionMark im(this); 2827 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2828 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2829 attributes.set_is_evex_instruction(); 2830 if (merge) { 2831 attributes.reset_is_clear_context(); 2832 } 2833 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2834 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2835 emit_int8(0x6F); 2836 emit_operand(dst, src); 2837 } 2838 2839 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 2840 assert(VM_Version::supports_avx512vlbw(), ""); 2841 InstructionMark im(this); 2842 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 2843 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2844 attributes.set_embedded_opmask_register_specifier(mask); 2845 attributes.set_is_evex_instruction(); 2846 if (merge) { 2847 attributes.reset_is_clear_context(); 2848 } 2849 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2850 emit_int8(0x6F); 2851 emit_operand(dst, src); 2852 } 2853 2854 void Assembler::evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { 2855 assert(VM_Version::supports_evex(), ""); 2856 assert(src != xnoreg, "sanity"); 2857 InstructionMark im(this); 2858 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2859 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2860 attributes.set_is_evex_instruction(); 2861 if (merge) { 2862 attributes.reset_is_clear_context(); 2863 } 2864 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2865 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2866 emit_int8(0x7F); 2867 emit_operand(src, dst); 2868 } 2869 2870 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 2871 assert(VM_Version::supports_avx512vlbw(), ""); 2872 assert(src != xnoreg, "sanity"); 2873 InstructionMark im(this); 2874 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2875 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2876 attributes.set_embedded_opmask_register_specifier(mask); 2877 attributes.set_is_evex_instruction(); 2878 if (merge) { 2879 attributes.reset_is_clear_context(); 2880 } 2881 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2882 emit_int8(0x7F); 2883 emit_operand(src, dst); 2884 } 2885 2886 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 2887 // Users of this routine assume k1 usage. 2888 evmovdqul(dst, k1, src, /*merge*/ false, vector_len); 2889 } 2890 2891 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 2892 assert(VM_Version::supports_evex(), ""); 2893 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2894 attributes.set_embedded_opmask_register_specifier(mask); 2895 attributes.set_is_evex_instruction(); 2896 if (merge) { 2897 attributes.reset_is_clear_context(); 2898 } 2899 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2900 emit_int8(0x6F); 2901 emit_int8((unsigned char)(0xC0 | encode)); 2902 } 2903 2904 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) { 2905 // Users of this routine assume k1 usage. 2906 evmovdqul(dst, k1, src, /*merge*/ false, vector_len); 2907 } 2908 2909 void Assembler::evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 2910 assert(VM_Version::supports_evex(), ""); 2911 InstructionMark im(this); 2912 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true); 2913 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2914 attributes.set_embedded_opmask_register_specifier(mask); 2915 attributes.set_is_evex_instruction(); 2916 if (merge) { 2917 attributes.reset_is_clear_context(); 2918 } 2919 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2920 emit_int8(0x6F); 2921 emit_operand(dst, src); 2922 } 2923 2924 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) { 2925 // Users of this routine assume k1 usage. 2926 evmovdqul(dst, k1, src, /*merge*/ true, vector_len); 2927 } 2928 2929 void Assembler::evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 2930 assert(VM_Version::supports_evex(), ""); 2931 assert(src != xnoreg, "sanity"); 2932 InstructionMark im(this); 2933 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2934 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2935 attributes.set_embedded_opmask_register_specifier(mask); 2936 attributes.set_is_evex_instruction(); 2937 if (merge) { 2938 attributes.reset_is_clear_context(); 2939 } 2940 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2941 emit_int8(0x7F); 2942 emit_operand(src, dst); 2943 } 2944 2945 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 2946 // Users of this routine assume k1 usage. 2947 evmovdquq(dst, k1, src, /*merge*/ false, vector_len); 2948 } 2949 2950 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 2951 assert(VM_Version::supports_evex(), ""); 2952 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2953 attributes.set_embedded_opmask_register_specifier(mask); 2954 attributes.set_is_evex_instruction(); 2955 if (merge) { 2956 attributes.reset_is_clear_context(); 2957 } 2958 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2959 emit_int8(0x6F); 2960 emit_int8((unsigned char)(0xC0 | encode)); 2961 } 2962 2963 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) { 2964 // Users of this routine assume k1 usage. 2965 evmovdquq(dst, k1, src, /*merge*/ false, vector_len); 2966 } 2967 2968 void Assembler::evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { 2969 assert(VM_Version::supports_evex(), ""); 2970 InstructionMark im(this); 2971 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2972 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2973 attributes.set_embedded_opmask_register_specifier(mask); 2974 attributes.set_is_evex_instruction(); 2975 if (merge) { 2976 attributes.reset_is_clear_context(); 2977 } 2978 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2979 emit_int8(0x6F); 2980 emit_operand(dst, src); 2981 } 2982 2983 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) { 2984 // Users of this routine assume k1 usage. 2985 evmovdquq(dst, k1, src, /*merge*/ true, vector_len); 2986 } 2987 2988 void Assembler::evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 2989 assert(VM_Version::supports_evex(), ""); 2990 assert(src != xnoreg, "sanity"); 2991 InstructionMark im(this); 2992 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2993 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2994 attributes.set_embedded_opmask_register_specifier(mask); 2995 if (merge) { 2996 attributes.reset_is_clear_context(); 2997 } 2998 attributes.set_is_evex_instruction(); 2999 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3000 emit_int8(0x7F); 3001 emit_operand(src, dst); 3002 } 3003 3004 // Uses zero extension on 64bit 3005 3006 void Assembler::movl(Register dst, int32_t imm32) { 3007 int encode = prefix_and_encode(dst->encoding()); 3008 emit_int8((unsigned char)(0xB8 | encode)); 3009 emit_int32(imm32); 3010 } 3011 3012 void Assembler::movl(Register dst, Register src) { 3013 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3014 emit_int8((unsigned char)0x8B); 3015 emit_int8((unsigned char)(0xC0 | encode)); 3016 } 3017 3018 void Assembler::movl(Register dst, Address src) { 3019 InstructionMark im(this); 3020 prefix(src, dst); 3021 emit_int8((unsigned char)0x8B); 3022 emit_operand(dst, src); 3023 } 3024 3025 void Assembler::movl(Address dst, int32_t imm32) { 3026 InstructionMark im(this); 3027 prefix(dst); 3028 emit_int8((unsigned char)0xC7); 3029 emit_operand(rax, dst, 4); 3030 emit_int32(imm32); 3031 } 3032 3033 void Assembler::movl(Address dst, Register src) { 3034 InstructionMark im(this); 3035 prefix(dst, src); 3036 emit_int8((unsigned char)0x89); 3037 emit_operand(src, dst); 3038 } 3039 3040 // New cpus require to use movsd and movss to avoid partial register stall 3041 // when loading from memory. But for old Opteron use movlpd instead of movsd. 3042 // The selection is done in MacroAssembler::movdbl() and movflt(). 3043 void Assembler::movlpd(XMMRegister dst, Address src) { 3044 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3045 InstructionMark im(this); 3046 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3047 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3048 attributes.set_rex_vex_w_reverted(); 3049 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3050 emit_int8(0x12); 3051 emit_operand(dst, src); 3052 } 3053 3054 void Assembler::movq( MMXRegister dst, Address src ) { 3055 assert( VM_Version::supports_mmx(), "" ); 3056 emit_int8(0x0F); 3057 emit_int8(0x6F); 3058 emit_operand(dst, src); 3059 } 3060 3061 void Assembler::movq( Address dst, MMXRegister src ) { 3062 assert( VM_Version::supports_mmx(), "" ); 3063 emit_int8(0x0F); 3064 emit_int8(0x7F); 3065 // workaround gcc (3.2.1-7a) bug 3066 // In that version of gcc with only an emit_operand(MMX, Address) 3067 // gcc will tail jump and try and reverse the parameters completely 3068 // obliterating dst in the process. By having a version available 3069 // that doesn't need to swap the args at the tail jump the bug is 3070 // avoided. 3071 emit_operand(dst, src); 3072 } 3073 3074 void Assembler::movq(XMMRegister dst, Address src) { 3075 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3076 InstructionMark im(this); 3077 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3078 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3079 attributes.set_rex_vex_w_reverted(); 3080 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3081 emit_int8(0x7E); 3082 emit_operand(dst, src); 3083 } 3084 3085 void Assembler::movq(Address dst, XMMRegister src) { 3086 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3087 InstructionMark im(this); 3088 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3089 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3090 attributes.set_rex_vex_w_reverted(); 3091 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3092 emit_int8((unsigned char)0xD6); 3093 emit_operand(src, dst); 3094 } 3095 3096 void Assembler::movq(Register dst, XMMRegister src) { 3097 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3098 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3099 // swap src/dst to get correct prefix 3100 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3101 emit_int8(0x7E); 3102 emit_int8((unsigned char)(0xC0 | encode)); 3103 } 3104 3105 void Assembler::movq(XMMRegister dst, Register src) { 3106 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3107 InstructionAttr attributes(AVX_128bit, /* rex_w */true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3108 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3109 emit_int8(0x6E); 3110 emit_int8((unsigned char)(0xC0 | encode)); 3111 } 3112 3113 void Assembler::movsbl(Register dst, Address src) { // movsxb 3114 InstructionMark im(this); 3115 prefix(src, dst); 3116 emit_int8(0x0F); 3117 emit_int8((unsigned char)0xBE); 3118 emit_operand(dst, src); 3119 } 3120 3121 void Assembler::movsbl(Register dst, Register src) { // movsxb 3122 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 3123 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 3124 emit_int8(0x0F); 3125 emit_int8((unsigned char)0xBE); 3126 emit_int8((unsigned char)(0xC0 | encode)); 3127 } 3128 3129 void Assembler::movsd(XMMRegister dst, XMMRegister src) { 3130 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3131 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3132 attributes.set_rex_vex_w_reverted(); 3133 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3134 emit_int8(0x10); 3135 emit_int8((unsigned char)(0xC0 | encode)); 3136 } 3137 3138 void Assembler::movsd(XMMRegister dst, Address src) { 3139 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3140 InstructionMark im(this); 3141 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3142 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3143 attributes.set_rex_vex_w_reverted(); 3144 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3145 emit_int8(0x10); 3146 emit_operand(dst, src); 3147 } 3148 3149 void Assembler::movsd(Address dst, XMMRegister src) { 3150 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3151 InstructionMark im(this); 3152 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3153 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3154 attributes.reset_is_clear_context(); 3155 attributes.set_rex_vex_w_reverted(); 3156 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3157 emit_int8(0x11); 3158 emit_operand(src, dst); 3159 } 3160 3161 void Assembler::movss(XMMRegister dst, XMMRegister src) { 3162 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3163 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3164 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3165 emit_int8(0x10); 3166 emit_int8((unsigned char)(0xC0 | encode)); 3167 } 3168 3169 void Assembler::movss(XMMRegister dst, Address src) { 3170 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3171 InstructionMark im(this); 3172 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3173 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3174 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3175 emit_int8(0x10); 3176 emit_operand(dst, src); 3177 } 3178 3179 void Assembler::movss(Address dst, XMMRegister src) { 3180 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3181 InstructionMark im(this); 3182 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3183 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3184 attributes.reset_is_clear_context(); 3185 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3186 emit_int8(0x11); 3187 emit_operand(src, dst); 3188 } 3189 3190 void Assembler::movswl(Register dst, Address src) { // movsxw 3191 InstructionMark im(this); 3192 prefix(src, dst); 3193 emit_int8(0x0F); 3194 emit_int8((unsigned char)0xBF); 3195 emit_operand(dst, src); 3196 } 3197 3198 void Assembler::movswl(Register dst, Register src) { // movsxw 3199 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3200 emit_int8(0x0F); 3201 emit_int8((unsigned char)0xBF); 3202 emit_int8((unsigned char)(0xC0 | encode)); 3203 } 3204 3205 void Assembler::movw(Address dst, int imm16) { 3206 InstructionMark im(this); 3207 3208 emit_int8(0x66); // switch to 16-bit mode 3209 prefix(dst); 3210 emit_int8((unsigned char)0xC7); 3211 emit_operand(rax, dst, 2); 3212 emit_int16(imm16); 3213 } 3214 3215 void Assembler::movw(Register dst, Address src) { 3216 InstructionMark im(this); 3217 emit_int8(0x66); 3218 prefix(src, dst); 3219 emit_int8((unsigned char)0x8B); 3220 emit_operand(dst, src); 3221 } 3222 3223 void Assembler::movw(Address dst, Register src) { 3224 InstructionMark im(this); 3225 emit_int8(0x66); 3226 prefix(dst, src); 3227 emit_int8((unsigned char)0x89); 3228 emit_operand(src, dst); 3229 } 3230 3231 void Assembler::movzbl(Register dst, Address src) { // movzxb 3232 InstructionMark im(this); 3233 prefix(src, dst); 3234 emit_int8(0x0F); 3235 emit_int8((unsigned char)0xB6); 3236 emit_operand(dst, src); 3237 } 3238 3239 void Assembler::movzbl(Register dst, Register src) { // movzxb 3240 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 3241 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 3242 emit_int8(0x0F); 3243 emit_int8((unsigned char)0xB6); 3244 emit_int8(0xC0 | encode); 3245 } 3246 3247 void Assembler::movzwl(Register dst, Address src) { // movzxw 3248 InstructionMark im(this); 3249 prefix(src, dst); 3250 emit_int8(0x0F); 3251 emit_int8((unsigned char)0xB7); 3252 emit_operand(dst, src); 3253 } 3254 3255 void Assembler::movzwl(Register dst, Register src) { // movzxw 3256 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3257 emit_int8(0x0F); 3258 emit_int8((unsigned char)0xB7); 3259 emit_int8(0xC0 | encode); 3260 } 3261 3262 void Assembler::mull(Address src) { 3263 InstructionMark im(this); 3264 prefix(src); 3265 emit_int8((unsigned char)0xF7); 3266 emit_operand(rsp, src); 3267 } 3268 3269 void Assembler::mull(Register src) { 3270 int encode = prefix_and_encode(src->encoding()); 3271 emit_int8((unsigned char)0xF7); 3272 emit_int8((unsigned char)(0xE0 | encode)); 3273 } 3274 3275 void Assembler::mulsd(XMMRegister dst, Address src) { 3276 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3277 InstructionMark im(this); 3278 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3279 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3280 attributes.set_rex_vex_w_reverted(); 3281 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3282 emit_int8(0x59); 3283 emit_operand(dst, src); 3284 } 3285 3286 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { 3287 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3288 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3289 attributes.set_rex_vex_w_reverted(); 3290 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3291 emit_int8(0x59); 3292 emit_int8((unsigned char)(0xC0 | encode)); 3293 } 3294 3295 void Assembler::mulss(XMMRegister dst, Address src) { 3296 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3297 InstructionMark im(this); 3298 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3299 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3300 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3301 emit_int8(0x59); 3302 emit_operand(dst, src); 3303 } 3304 3305 void Assembler::mulss(XMMRegister dst, XMMRegister src) { 3306 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3307 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3308 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3309 emit_int8(0x59); 3310 emit_int8((unsigned char)(0xC0 | encode)); 3311 } 3312 3313 void Assembler::negl(Register dst) { 3314 int encode = prefix_and_encode(dst->encoding()); 3315 emit_int8((unsigned char)0xF7); 3316 emit_int8((unsigned char)(0xD8 | encode)); 3317 } 3318 3319 void Assembler::nop(int i) { 3320 #ifdef ASSERT 3321 assert(i > 0, " "); 3322 // The fancy nops aren't currently recognized by debuggers making it a 3323 // pain to disassemble code while debugging. If asserts are on clearly 3324 // speed is not an issue so simply use the single byte traditional nop 3325 // to do alignment. 3326 3327 for (; i > 0 ; i--) emit_int8((unsigned char)0x90); 3328 return; 3329 3330 #endif // ASSERT 3331 3332 if (UseAddressNop && VM_Version::is_intel()) { 3333 // 3334 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel 3335 // 1: 0x90 3336 // 2: 0x66 0x90 3337 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3338 // 4: 0x0F 0x1F 0x40 0x00 3339 // 5: 0x0F 0x1F 0x44 0x00 0x00 3340 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3341 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3342 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3343 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3344 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3345 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3346 3347 // The rest coding is Intel specific - don't use consecutive address nops 3348 3349 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3350 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3351 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3352 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3353 3354 while(i >= 15) { 3355 // For Intel don't generate consecutive addess nops (mix with regular nops) 3356 i -= 15; 3357 emit_int8(0x66); // size prefix 3358 emit_int8(0x66); // size prefix 3359 emit_int8(0x66); // size prefix 3360 addr_nop_8(); 3361 emit_int8(0x66); // size prefix 3362 emit_int8(0x66); // size prefix 3363 emit_int8(0x66); // size prefix 3364 emit_int8((unsigned char)0x90); 3365 // nop 3366 } 3367 switch (i) { 3368 case 14: 3369 emit_int8(0x66); // size prefix 3370 case 13: 3371 emit_int8(0x66); // size prefix 3372 case 12: 3373 addr_nop_8(); 3374 emit_int8(0x66); // size prefix 3375 emit_int8(0x66); // size prefix 3376 emit_int8(0x66); // size prefix 3377 emit_int8((unsigned char)0x90); 3378 // nop 3379 break; 3380 case 11: 3381 emit_int8(0x66); // size prefix 3382 case 10: 3383 emit_int8(0x66); // size prefix 3384 case 9: 3385 emit_int8(0x66); // size prefix 3386 case 8: 3387 addr_nop_8(); 3388 break; 3389 case 7: 3390 addr_nop_7(); 3391 break; 3392 case 6: 3393 emit_int8(0x66); // size prefix 3394 case 5: 3395 addr_nop_5(); 3396 break; 3397 case 4: 3398 addr_nop_4(); 3399 break; 3400 case 3: 3401 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3402 emit_int8(0x66); // size prefix 3403 case 2: 3404 emit_int8(0x66); // size prefix 3405 case 1: 3406 emit_int8((unsigned char)0x90); 3407 // nop 3408 break; 3409 default: 3410 assert(i == 0, " "); 3411 } 3412 return; 3413 } 3414 if (UseAddressNop && VM_Version::is_amd()) { 3415 // 3416 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. 3417 // 1: 0x90 3418 // 2: 0x66 0x90 3419 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3420 // 4: 0x0F 0x1F 0x40 0x00 3421 // 5: 0x0F 0x1F 0x44 0x00 0x00 3422 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3423 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3424 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3425 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3426 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3427 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3428 3429 // The rest coding is AMD specific - use consecutive address nops 3430 3431 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3432 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3433 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3434 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3435 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3436 // Size prefixes (0x66) are added for larger sizes 3437 3438 while(i >= 22) { 3439 i -= 11; 3440 emit_int8(0x66); // size prefix 3441 emit_int8(0x66); // size prefix 3442 emit_int8(0x66); // size prefix 3443 addr_nop_8(); 3444 } 3445 // Generate first nop for size between 21-12 3446 switch (i) { 3447 case 21: 3448 i -= 1; 3449 emit_int8(0x66); // size prefix 3450 case 20: 3451 case 19: 3452 i -= 1; 3453 emit_int8(0x66); // size prefix 3454 case 18: 3455 case 17: 3456 i -= 1; 3457 emit_int8(0x66); // size prefix 3458 case 16: 3459 case 15: 3460 i -= 8; 3461 addr_nop_8(); 3462 break; 3463 case 14: 3464 case 13: 3465 i -= 7; 3466 addr_nop_7(); 3467 break; 3468 case 12: 3469 i -= 6; 3470 emit_int8(0x66); // size prefix 3471 addr_nop_5(); 3472 break; 3473 default: 3474 assert(i < 12, " "); 3475 } 3476 3477 // Generate second nop for size between 11-1 3478 switch (i) { 3479 case 11: 3480 emit_int8(0x66); // size prefix 3481 case 10: 3482 emit_int8(0x66); // size prefix 3483 case 9: 3484 emit_int8(0x66); // size prefix 3485 case 8: 3486 addr_nop_8(); 3487 break; 3488 case 7: 3489 addr_nop_7(); 3490 break; 3491 case 6: 3492 emit_int8(0x66); // size prefix 3493 case 5: 3494 addr_nop_5(); 3495 break; 3496 case 4: 3497 addr_nop_4(); 3498 break; 3499 case 3: 3500 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3501 emit_int8(0x66); // size prefix 3502 case 2: 3503 emit_int8(0x66); // size prefix 3504 case 1: 3505 emit_int8((unsigned char)0x90); 3506 // nop 3507 break; 3508 default: 3509 assert(i == 0, " "); 3510 } 3511 return; 3512 } 3513 3514 if (UseAddressNop && VM_Version::is_zx()) { 3515 // 3516 // Using multi-bytes nops "0x0F 0x1F [address]" for ZX 3517 // 1: 0x90 3518 // 2: 0x66 0x90 3519 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3520 // 4: 0x0F 0x1F 0x40 0x00 3521 // 5: 0x0F 0x1F 0x44 0x00 0x00 3522 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3523 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3524 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3525 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3526 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3527 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3528 3529 // The rest coding is ZX specific - don't use consecutive address nops 3530 3531 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3532 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3533 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3534 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3535 3536 while (i >= 15) { 3537 // For ZX don't generate consecutive addess nops (mix with regular nops) 3538 i -= 15; 3539 emit_int8(0x66); // size prefix 3540 emit_int8(0x66); // size prefix 3541 emit_int8(0x66); // size prefix 3542 addr_nop_8(); 3543 emit_int8(0x66); // size prefix 3544 emit_int8(0x66); // size prefix 3545 emit_int8(0x66); // size prefix 3546 emit_int8((unsigned char)0x90); 3547 // nop 3548 } 3549 switch (i) { 3550 case 14: 3551 emit_int8(0x66); // size prefix 3552 case 13: 3553 emit_int8(0x66); // size prefix 3554 case 12: 3555 addr_nop_8(); 3556 emit_int8(0x66); // size prefix 3557 emit_int8(0x66); // size prefix 3558 emit_int8(0x66); // size prefix 3559 emit_int8((unsigned char)0x90); 3560 // nop 3561 break; 3562 case 11: 3563 emit_int8(0x66); // size prefix 3564 case 10: 3565 emit_int8(0x66); // size prefix 3566 case 9: 3567 emit_int8(0x66); // size prefix 3568 case 8: 3569 addr_nop_8(); 3570 break; 3571 case 7: 3572 addr_nop_7(); 3573 break; 3574 case 6: 3575 emit_int8(0x66); // size prefix 3576 case 5: 3577 addr_nop_5(); 3578 break; 3579 case 4: 3580 addr_nop_4(); 3581 break; 3582 case 3: 3583 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3584 emit_int8(0x66); // size prefix 3585 case 2: 3586 emit_int8(0x66); // size prefix 3587 case 1: 3588 emit_int8((unsigned char)0x90); 3589 // nop 3590 break; 3591 default: 3592 assert(i == 0, " "); 3593 } 3594 return; 3595 } 3596 3597 // Using nops with size prefixes "0x66 0x90". 3598 // From AMD Optimization Guide: 3599 // 1: 0x90 3600 // 2: 0x66 0x90 3601 // 3: 0x66 0x66 0x90 3602 // 4: 0x66 0x66 0x66 0x90 3603 // 5: 0x66 0x66 0x90 0x66 0x90 3604 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 3605 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 3606 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 3607 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3608 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3609 // 3610 while(i > 12) { 3611 i -= 4; 3612 emit_int8(0x66); // size prefix 3613 emit_int8(0x66); 3614 emit_int8(0x66); 3615 emit_int8((unsigned char)0x90); 3616 // nop 3617 } 3618 // 1 - 12 nops 3619 if(i > 8) { 3620 if(i > 9) { 3621 i -= 1; 3622 emit_int8(0x66); 3623 } 3624 i -= 3; 3625 emit_int8(0x66); 3626 emit_int8(0x66); 3627 emit_int8((unsigned char)0x90); 3628 } 3629 // 1 - 8 nops 3630 if(i > 4) { 3631 if(i > 6) { 3632 i -= 1; 3633 emit_int8(0x66); 3634 } 3635 i -= 3; 3636 emit_int8(0x66); 3637 emit_int8(0x66); 3638 emit_int8((unsigned char)0x90); 3639 } 3640 switch (i) { 3641 case 4: 3642 emit_int8(0x66); 3643 case 3: 3644 emit_int8(0x66); 3645 case 2: 3646 emit_int8(0x66); 3647 case 1: 3648 emit_int8((unsigned char)0x90); 3649 break; 3650 default: 3651 assert(i == 0, " "); 3652 } 3653 } 3654 3655 void Assembler::notl(Register dst) { 3656 int encode = prefix_and_encode(dst->encoding()); 3657 emit_int8((unsigned char)0xF7); 3658 emit_int8((unsigned char)(0xD0 | encode)); 3659 } 3660 3661 void Assembler::orw(Register dst, Register src) { 3662 (void)prefix_and_encode(dst->encoding(), src->encoding()); 3663 emit_arith(0x0B, 0xC0, dst, src); 3664 } 3665 3666 void Assembler::orl(Address dst, int32_t imm32) { 3667 InstructionMark im(this); 3668 prefix(dst); 3669 emit_arith_operand(0x81, rcx, dst, imm32); 3670 } 3671 3672 void Assembler::orl(Register dst, int32_t imm32) { 3673 prefix(dst); 3674 emit_arith(0x81, 0xC8, dst, imm32); 3675 } 3676 3677 void Assembler::orl(Register dst, Address src) { 3678 InstructionMark im(this); 3679 prefix(src, dst); 3680 emit_int8(0x0B); 3681 emit_operand(dst, src); 3682 } 3683 3684 void Assembler::orl(Register dst, Register src) { 3685 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3686 emit_arith(0x0B, 0xC0, dst, src); 3687 } 3688 3689 void Assembler::orl(Address dst, Register src) { 3690 InstructionMark im(this); 3691 prefix(dst, src); 3692 emit_int8(0x09); 3693 emit_operand(src, dst); 3694 } 3695 3696 void Assembler::orb(Address dst, int imm8) { 3697 InstructionMark im(this); 3698 prefix(dst); 3699 emit_int8((unsigned char)0x80); 3700 emit_operand(rcx, dst, 1); 3701 emit_int8(imm8); 3702 } 3703 3704 void Assembler::packuswb(XMMRegister dst, Address src) { 3705 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3706 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3707 InstructionMark im(this); 3708 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3709 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3710 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3711 emit_int8(0x67); 3712 emit_operand(dst, src); 3713 } 3714 3715 void Assembler::packuswb(XMMRegister dst, XMMRegister src) { 3716 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3717 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3718 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3719 emit_int8(0x67); 3720 emit_int8((unsigned char)(0xC0 | encode)); 3721 } 3722 3723 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3724 assert(UseAVX > 0, "some form of AVX must be enabled"); 3725 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3726 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3727 emit_int8(0x67); 3728 emit_int8((unsigned char)(0xC0 | encode)); 3729 } 3730 3731 void Assembler::vpackusdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3732 assert(UseAVX > 0, "some form of AVX must be enabled"); 3733 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3734 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3735 emit_int8(0x2B); 3736 emit_int8((unsigned char)(0xC0 | encode)); 3737 } 3738 3739 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 3740 assert(VM_Version::supports_avx2(), ""); 3741 // VEX.256.66.0F3A.W1 00 /r ib 3742 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3743 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3744 emit_int8(0x00); 3745 emit_int8(0xC0 | encode); 3746 emit_int8(imm8); 3747 } 3748 3749 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3750 assert(VM_Version::supports_avx2(), ""); 3751 // VEX.NDS.256.66.0F38.W0 36 /r 3752 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3753 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3754 emit_int8(0x36); 3755 emit_int8(0xC0 | encode); 3756 } 3757 3758 void Assembler::vpermd(XMMRegister dst, XMMRegister nds, Address src) { 3759 assert(VM_Version::supports_avx2(), ""); 3760 // VEX.NDS.256.66.0F38.W0 36 /r 3761 InstructionMark im(this); 3762 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3763 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3764 emit_int8(0x36); 3765 emit_operand(dst, src); 3766 } 3767 3768 void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 3769 assert(VM_Version::supports_avx2(), ""); 3770 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3771 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3772 emit_int8(0x46); 3773 emit_int8(0xC0 | encode); 3774 emit_int8(imm8); 3775 } 3776 3777 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 3778 assert(VM_Version::supports_avx(), ""); 3779 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3780 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3781 emit_int8(0x06); 3782 emit_int8(0xC0 | encode); 3783 emit_int8(imm8); 3784 } 3785 3786 void Assembler::vpermilps(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 3787 assert(VM_Version::supports_avx(), ""); 3788 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 3789 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3790 emit_int8(0x04); 3791 emit_int8(0xC0 | encode); 3792 emit_int8(imm8); 3793 } 3794 3795 void Assembler::vpermpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 3796 assert(VM_Version::supports_avx2(), ""); 3797 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 3798 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3799 emit_int8(0x01); 3800 emit_int8(0xC0 | encode); 3801 emit_int8(imm8); 3802 } 3803 3804 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3805 assert(VM_Version::supports_evex(), ""); 3806 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3807 attributes.set_is_evex_instruction(); 3808 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3809 emit_int8(0x76); 3810 emit_int8((unsigned char)(0xC0 | encode)); 3811 } 3812 3813 void Assembler::pause() { 3814 emit_int8((unsigned char)0xF3); 3815 emit_int8((unsigned char)0x90); 3816 } 3817 3818 void Assembler::ud2() { 3819 emit_int8(0x0F); 3820 emit_int8(0x0B); 3821 } 3822 3823 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3824 assert(VM_Version::supports_sse4_2(), ""); 3825 InstructionMark im(this); 3826 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3827 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3828 emit_int8(0x61); 3829 emit_operand(dst, src); 3830 emit_int8(imm8); 3831 } 3832 3833 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3834 assert(VM_Version::supports_sse4_2(), ""); 3835 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3836 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3837 emit_int8(0x61); 3838 emit_int8((unsigned char)(0xC0 | encode)); 3839 emit_int8(imm8); 3840 } 3841 3842 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3843 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3844 assert(VM_Version::supports_sse2(), ""); 3845 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3846 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3847 emit_int8(0x74); 3848 emit_int8((unsigned char)(0xC0 | encode)); 3849 } 3850 3851 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3852 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3853 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 3854 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 3855 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3856 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3857 emit_int8(0x74); 3858 emit_int8((unsigned char)(0xC0 | encode)); 3859 } 3860 3861 // In this context, kdst is written the mask used to process the equal components 3862 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3863 assert(VM_Version::supports_avx512bw(), ""); 3864 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3865 attributes.set_is_evex_instruction(); 3866 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3867 emit_int8(0x74); 3868 emit_int8((unsigned char)(0xC0 | encode)); 3869 } 3870 3871 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3872 assert(VM_Version::supports_avx512vlbw(), ""); 3873 InstructionMark im(this); 3874 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3875 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3876 attributes.set_is_evex_instruction(); 3877 int dst_enc = kdst->encoding(); 3878 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3879 emit_int8(0x64); 3880 emit_operand(as_Register(dst_enc), src); 3881 } 3882 3883 void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 3884 assert(is_vector_masking(), ""); 3885 assert(VM_Version::supports_avx512vlbw(), ""); 3886 InstructionMark im(this); 3887 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 3888 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3889 attributes.reset_is_clear_context(); 3890 attributes.set_embedded_opmask_register_specifier(mask); 3891 attributes.set_is_evex_instruction(); 3892 int dst_enc = kdst->encoding(); 3893 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3894 emit_int8(0x64); 3895 emit_operand(as_Register(dst_enc), src); 3896 } 3897 3898 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { 3899 assert(VM_Version::supports_avx512vlbw(), ""); 3900 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3901 attributes.set_is_evex_instruction(); 3902 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3903 emit_int8(0x3E); 3904 emit_int8((unsigned char)(0xC0 | encode)); 3905 emit_int8(vcc); 3906 } 3907 3908 void Assembler::evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { 3909 assert(is_vector_masking(), ""); 3910 assert(VM_Version::supports_avx512vlbw(), ""); 3911 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 3912 attributes.reset_is_clear_context(); 3913 attributes.set_embedded_opmask_register_specifier(mask); 3914 attributes.set_is_evex_instruction(); 3915 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3916 emit_int8(0x3E); 3917 emit_int8((unsigned char)(0xC0 | encode)); 3918 emit_int8(vcc); 3919 } 3920 3921 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) { 3922 assert(VM_Version::supports_avx512vlbw(), ""); 3923 InstructionMark im(this); 3924 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3925 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3926 attributes.set_is_evex_instruction(); 3927 int dst_enc = kdst->encoding(); 3928 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3929 emit_int8(0x3E); 3930 emit_operand(as_Register(dst_enc), src); 3931 emit_int8(vcc); 3932 } 3933 3934 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3935 assert(VM_Version::supports_avx512bw(), ""); 3936 InstructionMark im(this); 3937 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 3938 attributes.set_is_evex_instruction(); 3939 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3940 int dst_enc = kdst->encoding(); 3941 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3942 emit_int8(0x74); 3943 emit_operand(as_Register(dst_enc), src); 3944 } 3945 3946 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 3947 assert(VM_Version::supports_avx512vlbw(), ""); 3948 assert(is_vector_masking(), ""); // For stub code use only 3949 InstructionMark im(this); 3950 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_reg_mask */ false, /* uses_vl */ false); 3951 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3952 attributes.reset_is_clear_context(); 3953 attributes.set_embedded_opmask_register_specifier(mask); 3954 attributes.set_is_evex_instruction(); 3955 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3956 emit_int8(0x74); 3957 emit_operand(as_Register(kdst->encoding()), src); 3958 } 3959 3960 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3961 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3962 assert(VM_Version::supports_sse2(), ""); 3963 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3964 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3965 emit_int8(0x75); 3966 emit_int8((unsigned char)(0xC0 | encode)); 3967 } 3968 3969 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3970 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3971 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 3972 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 3973 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3974 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3975 emit_int8(0x75); 3976 emit_int8((unsigned char)(0xC0 | encode)); 3977 } 3978 3979 // In this context, kdst is written the mask used to process the equal components 3980 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3981 assert(VM_Version::supports_avx512bw(), ""); 3982 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3983 attributes.set_is_evex_instruction(); 3984 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3985 emit_int8(0x75); 3986 emit_int8((unsigned char)(0xC0 | encode)); 3987 } 3988 3989 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3990 assert(VM_Version::supports_avx512bw(), ""); 3991 InstructionMark im(this); 3992 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3993 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3994 attributes.set_is_evex_instruction(); 3995 int dst_enc = kdst->encoding(); 3996 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3997 emit_int8(0x75); 3998 emit_operand(as_Register(dst_enc), src); 3999 } 4000 4001 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4002 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { 4003 assert(VM_Version::supports_sse2(), ""); 4004 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4005 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4006 emit_int8(0x76); 4007 emit_int8((unsigned char)(0xC0 | encode)); 4008 } 4009 4010 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4011 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4012 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 4013 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 4014 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4015 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4016 emit_int8((unsigned char)0x76); 4017 emit_int8((unsigned char)(0xC0 | encode)); 4018 } 4019 4020 // In this context, kdst is written the mask used to process the equal components 4021 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len) { 4022 assert(VM_Version::supports_evex(), ""); 4023 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4024 attributes.set_is_evex_instruction(); 4025 attributes.reset_is_clear_context(); 4026 attributes.set_embedded_opmask_register_specifier(mask); 4027 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4028 emit_int8(0x76); 4029 emit_int8((unsigned char)(0xC0 | encode)); 4030 } 4031 4032 void Assembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 4033 assert(VM_Version::supports_evex(), ""); 4034 InstructionMark im(this); 4035 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4036 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4037 attributes.set_is_evex_instruction(); 4038 attributes.reset_is_clear_context(); 4039 attributes.set_embedded_opmask_register_specifier(mask); 4040 int dst_enc = kdst->encoding(); 4041 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4042 emit_int8(0x76); 4043 emit_operand(as_Register(dst_enc), src); 4044 } 4045 4046 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4047 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) { 4048 assert(VM_Version::supports_sse4_1(), ""); 4049 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4050 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4051 emit_int8(0x29); 4052 emit_int8((unsigned char)(0xC0 | encode)); 4053 } 4054 4055 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 4056 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4057 assert(VM_Version::supports_avx(), ""); 4058 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4059 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4060 emit_int8(0x29); 4061 emit_int8((unsigned char)(0xC0 | encode)); 4062 } 4063 4064 // In this context, kdst is written the mask used to process the equal components 4065 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 4066 assert(VM_Version::supports_evex(), ""); 4067 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4068 attributes.reset_is_clear_context(); 4069 attributes.set_is_evex_instruction(); 4070 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4071 emit_int8(0x29); 4072 emit_int8((unsigned char)(0xC0 | encode)); 4073 } 4074 4075 // In this context, kdst is written the mask used to process the equal components 4076 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 4077 assert(VM_Version::supports_evex(), ""); 4078 InstructionMark im(this); 4079 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4080 attributes.reset_is_clear_context(); 4081 attributes.set_is_evex_instruction(); 4082 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4083 int dst_enc = kdst->encoding(); 4084 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4085 emit_int8(0x29); 4086 emit_operand(as_Register(dst_enc), src); 4087 } 4088 4089 void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) { 4090 assert(VM_Version::supports_sse4_1(), ""); 4091 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4092 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4093 emit_int8(0x37); 4094 emit_int8((unsigned char)(0xC0 | encode)); 4095 } 4096 4097 void Assembler::pmovmskb(Register dst, XMMRegister src) { 4098 assert(VM_Version::supports_sse2(), ""); 4099 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4100 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4101 emit_int8((unsigned char)0xD7); 4102 emit_int8((unsigned char)(0xC0 | encode)); 4103 } 4104 4105 void Assembler::vpmovmskb(Register dst, XMMRegister src) { 4106 assert(VM_Version::supports_avx2(), ""); 4107 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4108 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4109 emit_int8((unsigned char)0xD7); 4110 emit_int8((unsigned char)(0xC0 | encode)); 4111 } 4112 4113 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 4114 assert(VM_Version::supports_sse4_1(), ""); 4115 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4116 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4117 emit_int8(0x16); 4118 emit_int8((unsigned char)(0xC0 | encode)); 4119 emit_int8(imm8); 4120 } 4121 4122 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) { 4123 assert(VM_Version::supports_sse4_1(), ""); 4124 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4125 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4126 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4127 emit_int8(0x16); 4128 emit_operand(src, dst); 4129 emit_int8(imm8); 4130 } 4131 4132 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 4133 assert(VM_Version::supports_sse4_1(), ""); 4134 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4135 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4136 emit_int8(0x16); 4137 emit_int8((unsigned char)(0xC0 | encode)); 4138 emit_int8(imm8); 4139 } 4140 4141 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) { 4142 assert(VM_Version::supports_sse4_1(), ""); 4143 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4144 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4145 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4146 emit_int8(0x16); 4147 emit_operand(src, dst); 4148 emit_int8(imm8); 4149 } 4150 4151 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { 4152 assert(VM_Version::supports_sse2(), ""); 4153 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4154 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4155 emit_int8((unsigned char)0xC5); 4156 emit_int8((unsigned char)(0xC0 | encode)); 4157 emit_int8(imm8); 4158 } 4159 4160 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) { 4161 assert(VM_Version::supports_sse4_1(), ""); 4162 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4163 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 4164 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4165 emit_int8((unsigned char)0x15); 4166 emit_operand(src, dst); 4167 emit_int8(imm8); 4168 } 4169 4170 void Assembler::pextrb(Register dst, XMMRegister src, int imm8) { 4171 assert(VM_Version::supports_sse4_1(), ""); 4172 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4173 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4174 emit_int8(0x14); 4175 emit_int8((unsigned char)(0xC0 | encode)); 4176 emit_int8(imm8); 4177 } 4178 4179 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) { 4180 assert(VM_Version::supports_sse4_1(), ""); 4181 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4182 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 4183 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4184 emit_int8(0x14); 4185 emit_operand(src, dst); 4186 emit_int8(imm8); 4187 } 4188 4189 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 4190 assert(VM_Version::supports_sse4_1(), ""); 4191 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4192 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4193 emit_int8(0x22); 4194 emit_int8((unsigned char)(0xC0 | encode)); 4195 emit_int8(imm8); 4196 } 4197 4198 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) { 4199 assert(VM_Version::supports_sse4_1(), ""); 4200 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4201 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4202 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4203 emit_int8(0x22); 4204 emit_operand(dst,src); 4205 emit_int8(imm8); 4206 } 4207 4208 void Assembler::vpinsrd(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4209 assert(VM_Version::supports_avx(), ""); 4210 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4211 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4212 emit_int8((unsigned char)0x22); 4213 emit_int8((unsigned char)(0xC0 | encode)); 4214 emit_int8((unsigned char)imm8); 4215 } 4216 4217 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 4218 assert(VM_Version::supports_sse4_1(), ""); 4219 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4220 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4221 emit_int8(0x22); 4222 emit_int8((unsigned char)(0xC0 | encode)); 4223 emit_int8(imm8); 4224 } 4225 4226 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) { 4227 assert(VM_Version::supports_sse4_1(), ""); 4228 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4229 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4230 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4231 emit_int8(0x22); 4232 emit_operand(dst, src); 4233 emit_int8(imm8); 4234 } 4235 4236 void Assembler::vpinsrq(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4237 assert(VM_Version::supports_avx(), ""); 4238 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4239 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4240 emit_int8((unsigned char)0x22); 4241 emit_int8((unsigned char)(0xC0 | encode)); 4242 emit_int8((unsigned char)imm8); 4243 } 4244 4245 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { 4246 assert(VM_Version::supports_sse2(), ""); 4247 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4248 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4249 emit_int8((unsigned char)0xC4); 4250 emit_int8((unsigned char)(0xC0 | encode)); 4251 emit_int8(imm8); 4252 } 4253 4254 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) { 4255 assert(VM_Version::supports_sse2(), ""); 4256 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4257 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 4258 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4259 emit_int8((unsigned char)0xC4); 4260 emit_operand(dst, src); 4261 emit_int8(imm8); 4262 } 4263 4264 void Assembler::vpinsrw(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4265 assert(VM_Version::supports_avx(), ""); 4266 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4267 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4268 emit_int8((unsigned char)0xC4); 4269 emit_int8((unsigned char)(0xC0 | encode)); 4270 emit_int8((unsigned char)imm8); 4271 } 4272 4273 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) { 4274 assert(VM_Version::supports_sse4_1(), ""); 4275 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4276 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 4277 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4278 emit_int8(0x20); 4279 emit_operand(dst, src); 4280 emit_int8(imm8); 4281 } 4282 4283 void Assembler::pinsrb(XMMRegister dst, Register src, int imm8) { 4284 assert(VM_Version::supports_sse4_1(), ""); 4285 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 4286 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4287 emit_int8(0x20); 4288 emit_int8((unsigned char)(0xC0 | encode)); 4289 emit_int8(imm8); 4290 } 4291 4292 void Assembler::vpinsrb(XMMRegister dst, XMMRegister nds, Register src, int imm8) { 4293 assert(VM_Version::supports_avx(), ""); 4294 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4295 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4296 emit_int8((unsigned char)0x20); 4297 emit_int8((unsigned char)(0xC0 | encode)); 4298 emit_int8((unsigned char)imm8); 4299 } 4300 4301 void Assembler::insertps(XMMRegister dst, XMMRegister src, int imm8) { 4302 assert(VM_Version::supports_sse4_1(), ""); 4303 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4304 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4305 emit_int8(0x21); 4306 emit_int8((unsigned char)(0xC0 | encode)); 4307 emit_int8(imm8); 4308 } 4309 4310 void Assembler::vinsertps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 4311 assert(VM_Version::supports_avx(), ""); 4312 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4313 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4314 emit_int8(0x21); 4315 emit_int8((unsigned char)(0xC0 | encode)); 4316 emit_int8(imm8); 4317 } 4318 4319 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 4320 assert(VM_Version::supports_sse4_1(), ""); 4321 InstructionMark im(this); 4322 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4323 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4324 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4325 emit_int8(0x30); 4326 emit_operand(dst, src); 4327 } 4328 4329 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 4330 assert(VM_Version::supports_sse4_1(), ""); 4331 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4332 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4333 emit_int8(0x30); 4334 emit_int8((unsigned char)(0xC0 | encode)); 4335 } 4336 4337 void Assembler::pmovzxdq(XMMRegister dst, XMMRegister src) { 4338 assert(VM_Version::supports_sse4_1(), ""); 4339 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4340 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4341 emit_int8(0x35); 4342 emit_int8((unsigned char)(0xC0 | encode)); 4343 } 4344 4345 void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) { 4346 assert(VM_Version::supports_sse4_1(), ""); 4347 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4348 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4349 emit_int8(0x20); 4350 emit_int8((unsigned char)(0xC0 | encode)); 4351 } 4352 4353 void Assembler::pmovsxbd(XMMRegister dst, XMMRegister src) { 4354 assert(VM_Version::supports_sse4_1(), ""); 4355 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4356 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4357 emit_int8(0x21); 4358 emit_int8((unsigned char)(0xC0 | encode)); 4359 } 4360 4361 void Assembler::pmovsxbq(XMMRegister dst, XMMRegister src) { 4362 assert(VM_Version::supports_sse4_1(), ""); 4363 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4364 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4365 emit_int8(0x22); 4366 emit_int8((unsigned char)(0xC0 | encode)); 4367 } 4368 4369 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 4370 assert(VM_Version::supports_avx(), ""); 4371 InstructionMark im(this); 4372 assert(dst != xnoreg, "sanity"); 4373 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4374 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4375 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4376 emit_int8(0x30); 4377 emit_operand(dst, src); 4378 } 4379 4380 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { 4381 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4382 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4383 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 4384 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4385 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4386 emit_int8(0x30); 4387 emit_int8((unsigned char) (0xC0 | encode)); 4388 } 4389 4390 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) { 4391 assert(is_vector_masking(), ""); 4392 assert(VM_Version::supports_avx512vlbw(), ""); 4393 assert(dst != xnoreg, "sanity"); 4394 InstructionMark im(this); 4395 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4396 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4397 attributes.set_embedded_opmask_register_specifier(mask); 4398 attributes.set_is_evex_instruction(); 4399 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4400 emit_int8(0x30); 4401 emit_operand(dst, src); 4402 } 4403 void Assembler::vpmovzxdq(XMMRegister dst, XMMRegister src, int vector_len) { 4404 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), ""); 4405 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4406 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4407 emit_int8(0x35); 4408 emit_int8((unsigned char)(0xC0 | encode)); 4409 } 4410 4411 void Assembler::vpmovzxbd(XMMRegister dst, XMMRegister src, int vector_len) { 4412 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), ""); 4413 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4414 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4415 emit_int8(0x31); 4416 emit_int8((unsigned char)(0xC0 | encode)); 4417 } 4418 4419 void Assembler::vpmovzxbq(XMMRegister dst, XMMRegister src, int vector_len) { 4420 assert(vector_len > AVX_128bit ? VM_Version::supports_avx2() : VM_Version::supports_avx(), ""); 4421 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4422 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4423 emit_int8(0x32); 4424 emit_int8((unsigned char)(0xC0 | encode)); 4425 } 4426 4427 void Assembler::vpmovsxbd(XMMRegister dst, XMMRegister src, int vector_len) { 4428 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4429 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4430 VM_Version::supports_evex(), ""); 4431 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4432 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4433 emit_int8(0x21); 4434 emit_int8((unsigned char)(0xC0 | encode)); 4435 } 4436 4437 void Assembler::vpmovsxbq(XMMRegister dst, XMMRegister src, int vector_len) { 4438 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4439 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4440 VM_Version::supports_evex(), ""); 4441 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4442 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4443 emit_int8(0x22); 4444 emit_int8((unsigned char)(0xC0 | encode)); 4445 } 4446 4447 void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) { 4448 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4449 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4450 VM_Version::supports_evex(), ""); 4451 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4452 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4453 emit_int8(0x20); 4454 emit_int8((unsigned char)(0xC0 | encode)); 4455 } 4456 4457 void Assembler::vpmovsxwd(XMMRegister dst, XMMRegister src, int vector_len) { 4458 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4459 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4460 VM_Version::supports_evex(), ""); 4461 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4462 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4463 emit_int8(0x23); 4464 emit_int8((unsigned char)(0xC0 | encode)); 4465 } 4466 4467 void Assembler::vpmovsxwq(XMMRegister dst, XMMRegister src, int vector_len) { 4468 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4469 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4470 VM_Version::supports_evex(), ""); 4471 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4472 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4473 emit_int8(0x24); 4474 emit_int8((unsigned char)(0xC0 | encode)); 4475 } 4476 4477 void Assembler::vpmovsxdq(XMMRegister dst, XMMRegister src, int vector_len) { 4478 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4479 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4480 VM_Version::supports_evex(), ""); 4481 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4482 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4483 emit_int8(0x25); 4484 emit_int8((unsigned char)(0xC0 | encode)); 4485 } 4486 4487 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) { 4488 assert(VM_Version::supports_avx512vlbw(), ""); 4489 assert(src != xnoreg, "sanity"); 4490 InstructionMark im(this); 4491 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4492 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4493 attributes.set_is_evex_instruction(); 4494 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 4495 emit_int8(0x30); 4496 emit_operand(src, dst); 4497 } 4498 4499 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) { 4500 assert(is_vector_masking(), ""); 4501 assert(VM_Version::supports_avx512vlbw(), ""); 4502 assert(src != xnoreg, "sanity"); 4503 InstructionMark im(this); 4504 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4505 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 4506 attributes.reset_is_clear_context(); 4507 attributes.set_embedded_opmask_register_specifier(mask); 4508 attributes.set_is_evex_instruction(); 4509 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 4510 emit_int8(0x30); 4511 emit_operand(src, dst); 4512 } 4513 4514 void Assembler::evpmovdb(Address dst, XMMRegister src, int vector_len) { 4515 assert(VM_Version::supports_evex(), ""); 4516 assert(src != xnoreg, "sanity"); 4517 InstructionMark im(this); 4518 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4519 attributes.set_address_attributes(/* tuple_type */ EVEX_QVM, /* input_size_in_bits */ EVEX_NObit); 4520 attributes.set_is_evex_instruction(); 4521 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 4522 emit_int8(0x31); 4523 emit_operand(src, dst); 4524 } 4525 4526 void Assembler::vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len) { 4527 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4528 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4529 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " "); 4530 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4531 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4532 emit_int8(0x33); 4533 emit_int8((unsigned char)(0xC0 | encode)); 4534 } 4535 4536 // generic 4537 void Assembler::pop(Register dst) { 4538 int encode = prefix_and_encode(dst->encoding()); 4539 emit_int8(0x58 | encode); 4540 } 4541 4542 void Assembler::popcntl(Register dst, Address src) { 4543 assert(VM_Version::supports_popcnt(), "must support"); 4544 InstructionMark im(this); 4545 emit_int8((unsigned char)0xF3); 4546 prefix(src, dst); 4547 emit_int8(0x0F); 4548 emit_int8((unsigned char)0xB8); 4549 emit_operand(dst, src); 4550 } 4551 4552 void Assembler::popcntl(Register dst, Register src) { 4553 assert(VM_Version::supports_popcnt(), "must support"); 4554 emit_int8((unsigned char)0xF3); 4555 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 4556 emit_int8(0x0F); 4557 emit_int8((unsigned char)0xB8); 4558 emit_int8((unsigned char)(0xC0 | encode)); 4559 } 4560 4561 void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) { 4562 assert(VM_Version::supports_vpopcntdq(), "must support vpopcntdq feature"); 4563 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4564 attributes.set_is_evex_instruction(); 4565 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4566 emit_int8(0x55); 4567 emit_int8((unsigned char)(0xC0 | encode)); 4568 } 4569 4570 void Assembler::popf() { 4571 emit_int8((unsigned char)0x9D); 4572 } 4573 4574 #ifndef _LP64 // no 32bit push/pop on amd64 4575 void Assembler::popl(Address dst) { 4576 // NOTE: this will adjust stack by 8byte on 64bits 4577 InstructionMark im(this); 4578 prefix(dst); 4579 emit_int8((unsigned char)0x8F); 4580 emit_operand(rax, dst); 4581 } 4582 #endif 4583 4584 void Assembler::prefetch_prefix(Address src) { 4585 prefix(src); 4586 emit_int8(0x0F); 4587 } 4588 4589 void Assembler::prefetchnta(Address src) { 4590 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 4591 InstructionMark im(this); 4592 prefetch_prefix(src); 4593 emit_int8(0x18); 4594 emit_operand(rax, src); // 0, src 4595 } 4596 4597 void Assembler::prefetchr(Address src) { 4598 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 4599 InstructionMark im(this); 4600 prefetch_prefix(src); 4601 emit_int8(0x0D); 4602 emit_operand(rax, src); // 0, src 4603 } 4604 4605 void Assembler::prefetcht0(Address src) { 4606 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 4607 InstructionMark im(this); 4608 prefetch_prefix(src); 4609 emit_int8(0x18); 4610 emit_operand(rcx, src); // 1, src 4611 } 4612 4613 void Assembler::prefetcht1(Address src) { 4614 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 4615 InstructionMark im(this); 4616 prefetch_prefix(src); 4617 emit_int8(0x18); 4618 emit_operand(rdx, src); // 2, src 4619 } 4620 4621 void Assembler::prefetcht2(Address src) { 4622 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 4623 InstructionMark im(this); 4624 prefetch_prefix(src); 4625 emit_int8(0x18); 4626 emit_operand(rbx, src); // 3, src 4627 } 4628 4629 void Assembler::prefetchw(Address src) { 4630 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 4631 InstructionMark im(this); 4632 prefetch_prefix(src); 4633 emit_int8(0x0D); 4634 emit_operand(rcx, src); // 1, src 4635 } 4636 4637 void Assembler::prefix(Prefix p) { 4638 emit_int8(p); 4639 } 4640 4641 void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 4642 assert(VM_Version::supports_ssse3(), ""); 4643 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4644 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4645 emit_int8(0x00); 4646 emit_int8((unsigned char)(0xC0 | encode)); 4647 } 4648 4649 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4650 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4651 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4652 0, ""); 4653 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4654 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4655 emit_int8(0x00); 4656 emit_int8((unsigned char)(0xC0 | encode)); 4657 } 4658 4659 void Assembler::pshufb(XMMRegister dst, Address src) { 4660 assert(VM_Version::supports_ssse3(), ""); 4661 InstructionMark im(this); 4662 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4663 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4664 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4665 emit_int8(0x00); 4666 emit_operand(dst, src); 4667 } 4668 4669 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 4670 assert(isByte(mode), "invalid value"); 4671 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4672 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 4673 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4674 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4675 emit_int8(0x70); 4676 emit_int8((unsigned char)(0xC0 | encode)); 4677 emit_int8(mode & 0xFF); 4678 } 4679 4680 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) { 4681 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4682 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4683 0, ""); 4684 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4685 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4686 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4687 emit_int8(0x70); 4688 emit_int8((unsigned char)(0xC0 | encode)); 4689 emit_int8(mode & 0xFF); 4690 } 4691 4692 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { 4693 assert(isByte(mode), "invalid value"); 4694 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4695 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4696 InstructionMark im(this); 4697 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4698 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4699 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4700 emit_int8(0x70); 4701 emit_operand(dst, src); 4702 emit_int8(mode & 0xFF); 4703 } 4704 4705 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 4706 assert(isByte(mode), "invalid value"); 4707 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4708 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4709 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4710 emit_int8(0x70); 4711 emit_int8((unsigned char)(0xC0 | encode)); 4712 emit_int8(mode & 0xFF); 4713 } 4714 4715 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { 4716 assert(isByte(mode), "invalid value"); 4717 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4718 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4719 InstructionMark im(this); 4720 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4721 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4722 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4723 emit_int8(0x70); 4724 emit_operand(dst, src); 4725 emit_int8(mode & 0xFF); 4726 } 4727 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 4728 assert(VM_Version::supports_evex(), "requires EVEX support"); 4729 assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, ""); 4730 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4731 attributes.set_is_evex_instruction(); 4732 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4733 emit_int8(0x43); 4734 emit_int8((unsigned char)(0xC0 | encode)); 4735 emit_int8(imm8 & 0xFF); 4736 } 4737 void Assembler::vpshufpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 4738 assert(vector_len == Assembler::AVX_128bit || Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, ""); 4739 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4740 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4741 emit_int8(0xC6); 4742 emit_int8((unsigned char)(0xC0 | encode)); 4743 emit_int8(imm8 & 0xFF); 4744 } 4745 4746 void Assembler::vpshufps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 4747 assert(vector_len == Assembler::AVX_128bit || Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, ""); 4748 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4749 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4750 emit_int8(0xC6); 4751 emit_int8((unsigned char)(0xC0 | encode)); 4752 emit_int8(imm8 & 0xFF); 4753 } 4754 void Assembler::psrldq(XMMRegister dst, int shift) { 4755 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 4756 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4757 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4758 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4759 emit_int8(0x73); 4760 emit_int8((unsigned char)(0xC0 | encode)); 4761 emit_int8(shift); 4762 } 4763 4764 void Assembler::pslldq(XMMRegister dst, int shift) { 4765 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 4766 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4767 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 4768 // XMM7 is for /7 encoding: 66 0F 73 /7 ib 4769 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4770 emit_int8(0x73); 4771 emit_int8((unsigned char)(0xC0 | encode)); 4772 emit_int8(shift); 4773 } 4774 4775 void Assembler::ptest(XMMRegister dst, Address src) { 4776 assert(VM_Version::supports_sse4_1(), ""); 4777 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4778 InstructionMark im(this); 4779 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4780 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4781 emit_int8(0x17); 4782 emit_operand(dst, src); 4783 } 4784 4785 void Assembler::ptest(XMMRegister dst, XMMRegister src) { 4786 assert(VM_Version::supports_sse4_1(), ""); 4787 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4788 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4789 emit_int8(0x17); 4790 emit_int8((unsigned char)(0xC0 | encode)); 4791 } 4792 4793 void Assembler::vptest(XMMRegister dst, Address src) { 4794 assert(VM_Version::supports_avx(), ""); 4795 InstructionMark im(this); 4796 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4797 assert(dst != xnoreg, "sanity"); 4798 // swap src<->dst for encoding 4799 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4800 emit_int8(0x17); 4801 emit_operand(dst, src); 4802 } 4803 4804 void Assembler::vptest(XMMRegister dst, XMMRegister src) { 4805 assert(VM_Version::supports_avx(), ""); 4806 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4807 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4808 emit_int8(0x17); 4809 emit_int8((unsigned char)(0xC0 | encode)); 4810 } 4811 4812 void Assembler::vptest(XMMRegister dst, XMMRegister src, int vector_len) { 4813 assert(VM_Version::supports_avx(), ""); 4814 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4815 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4816 emit_int8(0x17); 4817 emit_int8((unsigned char)(0xC0 | encode)); 4818 } 4819 4820 void Assembler::punpcklbw(XMMRegister dst, Address src) { 4821 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4822 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4823 InstructionMark im(this); 4824 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 4825 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4826 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4827 emit_int8(0x60); 4828 emit_operand(dst, src); 4829 } 4830 4831 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { 4832 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4833 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 4834 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4835 emit_int8(0x60); 4836 emit_int8((unsigned char)(0xC0 | encode)); 4837 } 4838 4839 void Assembler::punpckldq(XMMRegister dst, Address src) { 4840 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4841 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4842 InstructionMark im(this); 4843 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4844 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4845 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4846 emit_int8(0x62); 4847 emit_operand(dst, src); 4848 } 4849 4850 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { 4851 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4852 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4853 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4854 emit_int8(0x62); 4855 emit_int8((unsigned char)(0xC0 | encode)); 4856 } 4857 4858 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 4859 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4860 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4861 attributes.set_rex_vex_w_reverted(); 4862 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4863 emit_int8(0x6C); 4864 emit_int8((unsigned char)(0xC0 | encode)); 4865 } 4866 4867 void Assembler::push(int32_t imm32) { 4868 // in 64bits we push 64bits onto the stack but only 4869 // take a 32bit immediate 4870 emit_int8(0x68); 4871 emit_int32(imm32); 4872 } 4873 4874 void Assembler::push(Register src) { 4875 int encode = prefix_and_encode(src->encoding()); 4876 4877 emit_int8(0x50 | encode); 4878 } 4879 4880 void Assembler::pushf() { 4881 emit_int8((unsigned char)0x9C); 4882 } 4883 4884 #ifndef _LP64 // no 32bit push/pop on amd64 4885 void Assembler::pushl(Address src) { 4886 // Note this will push 64bit on 64bit 4887 InstructionMark im(this); 4888 prefix(src); 4889 emit_int8((unsigned char)0xFF); 4890 emit_operand(rsi, src); 4891 } 4892 #endif 4893 4894 void Assembler::rcll(Register dst, int imm8) { 4895 assert(isShiftCount(imm8), "illegal shift count"); 4896 int encode = prefix_and_encode(dst->encoding()); 4897 if (imm8 == 1) { 4898 emit_int8((unsigned char)0xD1); 4899 emit_int8((unsigned char)(0xD0 | encode)); 4900 } else { 4901 emit_int8((unsigned char)0xC1); 4902 emit_int8((unsigned char)0xD0 | encode); 4903 emit_int8(imm8); 4904 } 4905 } 4906 4907 void Assembler::rcpps(XMMRegister dst, XMMRegister src) { 4908 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4909 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4910 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4911 emit_int8(0x53); 4912 emit_int8((unsigned char)(0xC0 | encode)); 4913 } 4914 4915 void Assembler::rcpss(XMMRegister dst, XMMRegister src) { 4916 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4917 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4918 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4919 emit_int8(0x53); 4920 emit_int8((unsigned char)(0xC0 | encode)); 4921 } 4922 4923 void Assembler::rdtsc() { 4924 emit_int8((unsigned char)0x0F); 4925 emit_int8((unsigned char)0x31); 4926 } 4927 4928 // copies data from [esi] to [edi] using rcx pointer sized words 4929 // generic 4930 void Assembler::rep_mov() { 4931 emit_int8((unsigned char)0xF3); 4932 // MOVSQ 4933 LP64_ONLY(prefix(REX_W)); 4934 emit_int8((unsigned char)0xA5); 4935 } 4936 4937 // sets rcx bytes with rax, value at [edi] 4938 void Assembler::rep_stosb() { 4939 emit_int8((unsigned char)0xF3); // REP 4940 LP64_ONLY(prefix(REX_W)); 4941 emit_int8((unsigned char)0xAA); // STOSB 4942 } 4943 4944 // sets rcx pointer sized words with rax, value at [edi] 4945 // generic 4946 void Assembler::rep_stos() { 4947 emit_int8((unsigned char)0xF3); // REP 4948 LP64_ONLY(prefix(REX_W)); // LP64:STOSQ, LP32:STOSD 4949 emit_int8((unsigned char)0xAB); 4950 } 4951 4952 // scans rcx pointer sized words at [edi] for occurance of rax, 4953 // generic 4954 void Assembler::repne_scan() { // repne_scan 4955 emit_int8((unsigned char)0xF2); 4956 // SCASQ 4957 LP64_ONLY(prefix(REX_W)); 4958 emit_int8((unsigned char)0xAF); 4959 } 4960 4961 #ifdef _LP64 4962 // scans rcx 4 byte words at [edi] for occurance of rax, 4963 // generic 4964 void Assembler::repne_scanl() { // repne_scan 4965 emit_int8((unsigned char)0xF2); 4966 // SCASL 4967 emit_int8((unsigned char)0xAF); 4968 } 4969 #endif 4970 4971 void Assembler::ret(int imm16) { 4972 if (imm16 == 0) { 4973 emit_int8((unsigned char)0xC3); 4974 } else { 4975 emit_int8((unsigned char)0xC2); 4976 emit_int16(imm16); 4977 } 4978 } 4979 4980 void Assembler::sahf() { 4981 #ifdef _LP64 4982 // Not supported in 64bit mode 4983 ShouldNotReachHere(); 4984 #endif 4985 emit_int8((unsigned char)0x9E); 4986 } 4987 4988 void Assembler::sarl(Register dst, int imm8) { 4989 int encode = prefix_and_encode(dst->encoding()); 4990 assert(isShiftCount(imm8), "illegal shift count"); 4991 if (imm8 == 1) { 4992 emit_int8((unsigned char)0xD1); 4993 emit_int8((unsigned char)(0xF8 | encode)); 4994 } else { 4995 emit_int8((unsigned char)0xC1); 4996 emit_int8((unsigned char)(0xF8 | encode)); 4997 emit_int8(imm8); 4998 } 4999 } 5000 5001 void Assembler::sarl(Register dst) { 5002 int encode = prefix_and_encode(dst->encoding()); 5003 emit_int8((unsigned char)0xD3); 5004 emit_int8((unsigned char)(0xF8 | encode)); 5005 } 5006 5007 void Assembler::sbbl(Address dst, int32_t imm32) { 5008 InstructionMark im(this); 5009 prefix(dst); 5010 emit_arith_operand(0x81, rbx, dst, imm32); 5011 } 5012 5013 void Assembler::sbbl(Register dst, int32_t imm32) { 5014 prefix(dst); 5015 emit_arith(0x81, 0xD8, dst, imm32); 5016 } 5017 5018 5019 void Assembler::sbbl(Register dst, Address src) { 5020 InstructionMark im(this); 5021 prefix(src, dst); 5022 emit_int8(0x1B); 5023 emit_operand(dst, src); 5024 } 5025 5026 void Assembler::sbbl(Register dst, Register src) { 5027 (void) prefix_and_encode(dst->encoding(), src->encoding()); 5028 emit_arith(0x1B, 0xC0, dst, src); 5029 } 5030 5031 void Assembler::setb(Condition cc, Register dst) { 5032 assert(0 <= cc && cc < 16, "illegal cc"); 5033 int encode = prefix_and_encode(dst->encoding(), true); 5034 emit_int8(0x0F); 5035 emit_int8((unsigned char)0x90 | cc); 5036 emit_int8((unsigned char)(0xC0 | encode)); 5037 } 5038 5039 void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) { 5040 assert(VM_Version::supports_ssse3(), ""); 5041 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ false); 5042 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5043 emit_int8((unsigned char)0x0F); 5044 emit_int8((unsigned char)(0xC0 | encode)); 5045 emit_int8(imm8); 5046 } 5047 5048 void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 5049 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 5050 vector_len == AVX_256bit? VM_Version::supports_avx2() : 5051 0, ""); 5052 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 5053 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5054 emit_int8((unsigned char)0x0F); 5055 emit_int8((unsigned char)(0xC0 | encode)); 5056 emit_int8(imm8); 5057 } 5058 5059 void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 5060 assert(VM_Version::supports_evex(), ""); 5061 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5062 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5063 emit_int8(0x3); 5064 emit_int8((unsigned char)(0xC0 | encode)); 5065 emit_int8(imm8); 5066 } 5067 5068 void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) { 5069 assert(VM_Version::supports_sse4_1(), ""); 5070 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5071 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5072 emit_int8((unsigned char)0x0E); 5073 emit_int8((unsigned char)(0xC0 | encode)); 5074 emit_int8(imm8); 5075 } 5076 5077 void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) { 5078 assert(VM_Version::supports_sha(), ""); 5079 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false); 5080 emit_int8((unsigned char)0xCC); 5081 emit_int8((unsigned char)(0xC0 | encode)); 5082 emit_int8((unsigned char)imm8); 5083 } 5084 5085 void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) { 5086 assert(VM_Version::supports_sha(), ""); 5087 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5088 emit_int8((unsigned char)0xC8); 5089 emit_int8((unsigned char)(0xC0 | encode)); 5090 } 5091 5092 void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) { 5093 assert(VM_Version::supports_sha(), ""); 5094 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5095 emit_int8((unsigned char)0xC9); 5096 emit_int8((unsigned char)(0xC0 | encode)); 5097 } 5098 5099 void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) { 5100 assert(VM_Version::supports_sha(), ""); 5101 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5102 emit_int8((unsigned char)0xCA); 5103 emit_int8((unsigned char)(0xC0 | encode)); 5104 } 5105 5106 // xmm0 is implicit additional source to this instruction. 5107 void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) { 5108 assert(VM_Version::supports_sha(), ""); 5109 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5110 emit_int8((unsigned char)0xCB); 5111 emit_int8((unsigned char)(0xC0 | encode)); 5112 } 5113 5114 void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) { 5115 assert(VM_Version::supports_sha(), ""); 5116 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5117 emit_int8((unsigned char)0xCC); 5118 emit_int8((unsigned char)(0xC0 | encode)); 5119 } 5120 5121 void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) { 5122 assert(VM_Version::supports_sha(), ""); 5123 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 5124 emit_int8((unsigned char)0xCD); 5125 emit_int8((unsigned char)(0xC0 | encode)); 5126 } 5127 5128 5129 void Assembler::shll(Register dst, int imm8) { 5130 assert(isShiftCount(imm8), "illegal shift count"); 5131 int encode = prefix_and_encode(dst->encoding()); 5132 if (imm8 == 1 ) { 5133 emit_int8((unsigned char)0xD1); 5134 emit_int8((unsigned char)(0xE0 | encode)); 5135 } else { 5136 emit_int8((unsigned char)0xC1); 5137 emit_int8((unsigned char)(0xE0 | encode)); 5138 emit_int8(imm8); 5139 } 5140 } 5141 5142 void Assembler::shll(Register dst) { 5143 int encode = prefix_and_encode(dst->encoding()); 5144 emit_int8((unsigned char)0xD3); 5145 emit_int8((unsigned char)(0xE0 | encode)); 5146 } 5147 5148 void Assembler::shrl(Register dst, int imm8) { 5149 assert(isShiftCount(imm8), "illegal shift count"); 5150 int encode = prefix_and_encode(dst->encoding()); 5151 emit_int8((unsigned char)0xC1); 5152 emit_int8((unsigned char)(0xE8 | encode)); 5153 emit_int8(imm8); 5154 } 5155 5156 void Assembler::shrl(Register dst) { 5157 int encode = prefix_and_encode(dst->encoding()); 5158 emit_int8((unsigned char)0xD3); 5159 emit_int8((unsigned char)(0xE8 | encode)); 5160 } 5161 5162 // copies a single word from [esi] to [edi] 5163 void Assembler::smovl() { 5164 emit_int8((unsigned char)0xA5); 5165 } 5166 5167 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { 5168 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5169 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5170 attributes.set_rex_vex_w_reverted(); 5171 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5172 emit_int8(0x51); 5173 emit_int8((unsigned char)(0xC0 | encode)); 5174 } 5175 5176 void Assembler::sqrtsd(XMMRegister dst, Address src) { 5177 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5178 InstructionMark im(this); 5179 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5180 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5181 attributes.set_rex_vex_w_reverted(); 5182 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5183 emit_int8(0x51); 5184 emit_operand(dst, src); 5185 } 5186 5187 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 5188 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5189 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5190 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5191 emit_int8(0x51); 5192 emit_int8((unsigned char)(0xC0 | encode)); 5193 } 5194 5195 void Assembler::std() { 5196 emit_int8((unsigned char)0xFD); 5197 } 5198 5199 void Assembler::sqrtss(XMMRegister dst, Address src) { 5200 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5201 InstructionMark im(this); 5202 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5203 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5204 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5205 emit_int8(0x51); 5206 emit_operand(dst, src); 5207 } 5208 5209 void Assembler::stmxcsr( Address dst) { 5210 if (UseAVX > 0 ) { 5211 assert(VM_Version::supports_avx(), ""); 5212 InstructionMark im(this); 5213 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 5214 vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5215 emit_int8((unsigned char)0xAE); 5216 emit_operand(as_Register(3), dst); 5217 } else { 5218 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5219 InstructionMark im(this); 5220 prefix(dst); 5221 emit_int8(0x0F); 5222 emit_int8((unsigned char)0xAE); 5223 emit_operand(as_Register(3), dst); 5224 } 5225 } 5226 5227 void Assembler::subl(Address dst, int32_t imm32) { 5228 InstructionMark im(this); 5229 prefix(dst); 5230 emit_arith_operand(0x81, rbp, dst, imm32); 5231 } 5232 5233 void Assembler::subl(Address dst, Register src) { 5234 InstructionMark im(this); 5235 prefix(dst, src); 5236 emit_int8(0x29); 5237 emit_operand(src, dst); 5238 } 5239 5240 void Assembler::subl(Register dst, int32_t imm32) { 5241 prefix(dst); 5242 emit_arith(0x81, 0xE8, dst, imm32); 5243 } 5244 5245 // Force generation of a 4 byte immediate value even if it fits into 8bit 5246 void Assembler::subl_imm32(Register dst, int32_t imm32) { 5247 prefix(dst); 5248 emit_arith_imm32(0x81, 0xE8, dst, imm32); 5249 } 5250 5251 void Assembler::subl(Register dst, Address src) { 5252 InstructionMark im(this); 5253 prefix(src, dst); 5254 emit_int8(0x2B); 5255 emit_operand(dst, src); 5256 } 5257 5258 void Assembler::subl(Register dst, Register src) { 5259 (void) prefix_and_encode(dst->encoding(), src->encoding()); 5260 emit_arith(0x2B, 0xC0, dst, src); 5261 } 5262 5263 void Assembler::subsd(XMMRegister dst, XMMRegister src) { 5264 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5265 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5266 attributes.set_rex_vex_w_reverted(); 5267 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5268 emit_int8(0x5C); 5269 emit_int8((unsigned char)(0xC0 | encode)); 5270 } 5271 5272 void Assembler::subsd(XMMRegister dst, Address src) { 5273 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5274 InstructionMark im(this); 5275 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5276 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5277 attributes.set_rex_vex_w_reverted(); 5278 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5279 emit_int8(0x5C); 5280 emit_operand(dst, src); 5281 } 5282 5283 void Assembler::subss(XMMRegister dst, XMMRegister src) { 5284 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5285 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false); 5286 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5287 emit_int8(0x5C); 5288 emit_int8((unsigned char)(0xC0 | encode)); 5289 } 5290 5291 void Assembler::subss(XMMRegister dst, Address src) { 5292 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5293 InstructionMark im(this); 5294 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5295 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5296 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5297 emit_int8(0x5C); 5298 emit_operand(dst, src); 5299 } 5300 5301 void Assembler::testb(Register dst, int imm8) { 5302 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 5303 (void) prefix_and_encode(dst->encoding(), true); 5304 emit_arith_b(0xF6, 0xC0, dst, imm8); 5305 } 5306 5307 void Assembler::testb(Address dst, int imm8) { 5308 InstructionMark im(this); 5309 prefix(dst); 5310 emit_int8((unsigned char)0xF6); 5311 emit_operand(rax, dst, 1); 5312 emit_int8(imm8); 5313 } 5314 5315 void Assembler::testl(Register dst, int32_t imm32) { 5316 // not using emit_arith because test 5317 // doesn't support sign-extension of 5318 // 8bit operands 5319 int encode = dst->encoding(); 5320 if (encode == 0) { 5321 emit_int8((unsigned char)0xA9); 5322 } else { 5323 encode = prefix_and_encode(encode); 5324 emit_int8((unsigned char)0xF7); 5325 emit_int8((unsigned char)(0xC0 | encode)); 5326 } 5327 emit_int32(imm32); 5328 } 5329 5330 void Assembler::testl(Register dst, Register src) { 5331 (void) prefix_and_encode(dst->encoding(), src->encoding()); 5332 emit_arith(0x85, 0xC0, dst, src); 5333 } 5334 5335 void Assembler::testl(Register dst, Address src) { 5336 InstructionMark im(this); 5337 prefix(src, dst); 5338 emit_int8((unsigned char)0x85); 5339 emit_operand(dst, src); 5340 } 5341 5342 void Assembler::tzcntl(Register dst, Register src) { 5343 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 5344 emit_int8((unsigned char)0xF3); 5345 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 5346 emit_int8(0x0F); 5347 emit_int8((unsigned char)0xBC); 5348 emit_int8((unsigned char)0xC0 | encode); 5349 } 5350 5351 void Assembler::tzcntq(Register dst, Register src) { 5352 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 5353 emit_int8((unsigned char)0xF3); 5354 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 5355 emit_int8(0x0F); 5356 emit_int8((unsigned char)0xBC); 5357 emit_int8((unsigned char)(0xC0 | encode)); 5358 } 5359 5360 void Assembler::ucomisd(XMMRegister dst, Address src) { 5361 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5362 InstructionMark im(this); 5363 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5364 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5365 attributes.set_rex_vex_w_reverted(); 5366 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5367 emit_int8(0x2E); 5368 emit_operand(dst, src); 5369 } 5370 5371 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { 5372 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5373 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5374 attributes.set_rex_vex_w_reverted(); 5375 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5376 emit_int8(0x2E); 5377 emit_int8((unsigned char)(0xC0 | encode)); 5378 } 5379 5380 void Assembler::ucomiss(XMMRegister dst, Address src) { 5381 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5382 InstructionMark im(this); 5383 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5384 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5385 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5386 emit_int8(0x2E); 5387 emit_operand(dst, src); 5388 } 5389 5390 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { 5391 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5392 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5393 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5394 emit_int8(0x2E); 5395 emit_int8((unsigned char)(0xC0 | encode)); 5396 } 5397 5398 void Assembler::xabort(int8_t imm8) { 5399 emit_int8((unsigned char)0xC6); 5400 emit_int8((unsigned char)0xF8); 5401 emit_int8((unsigned char)(imm8 & 0xFF)); 5402 } 5403 5404 void Assembler::xaddb(Address dst, Register src) { 5405 InstructionMark im(this); 5406 prefix(dst, src, true); 5407 emit_int8(0x0F); 5408 emit_int8((unsigned char)0xC0); 5409 emit_operand(src, dst); 5410 } 5411 5412 void Assembler::xaddw(Address dst, Register src) { 5413 InstructionMark im(this); 5414 emit_int8(0x66); 5415 prefix(dst, src); 5416 emit_int8(0x0F); 5417 emit_int8((unsigned char)0xC1); 5418 emit_operand(src, dst); 5419 } 5420 5421 void Assembler::xaddl(Address dst, Register src) { 5422 InstructionMark im(this); 5423 prefix(dst, src); 5424 emit_int8(0x0F); 5425 emit_int8((unsigned char)0xC1); 5426 emit_operand(src, dst); 5427 } 5428 5429 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { 5430 InstructionMark im(this); 5431 relocate(rtype); 5432 if (abort.is_bound()) { 5433 address entry = target(abort); 5434 assert(entry != NULL, "abort entry NULL"); 5435 intptr_t offset = entry - pc(); 5436 emit_int8((unsigned char)0xC7); 5437 emit_int8((unsigned char)0xF8); 5438 emit_int32(offset - 6); // 2 opcode + 4 address 5439 } else { 5440 abort.add_patch_at(code(), locator()); 5441 emit_int8((unsigned char)0xC7); 5442 emit_int8((unsigned char)0xF8); 5443 emit_int32(0); 5444 } 5445 } 5446 5447 void Assembler::xchgb(Register dst, Address src) { // xchg 5448 InstructionMark im(this); 5449 prefix(src, dst, true); 5450 emit_int8((unsigned char)0x86); 5451 emit_operand(dst, src); 5452 } 5453 5454 void Assembler::xchgw(Register dst, Address src) { // xchg 5455 InstructionMark im(this); 5456 emit_int8(0x66); 5457 prefix(src, dst); 5458 emit_int8((unsigned char)0x87); 5459 emit_operand(dst, src); 5460 } 5461 5462 void Assembler::xchgl(Register dst, Address src) { // xchg 5463 InstructionMark im(this); 5464 prefix(src, dst); 5465 emit_int8((unsigned char)0x87); 5466 emit_operand(dst, src); 5467 } 5468 5469 void Assembler::xchgl(Register dst, Register src) { 5470 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 5471 emit_int8((unsigned char)0x87); 5472 emit_int8((unsigned char)(0xC0 | encode)); 5473 } 5474 5475 void Assembler::xend() { 5476 emit_int8((unsigned char)0x0F); 5477 emit_int8((unsigned char)0x01); 5478 emit_int8((unsigned char)0xD5); 5479 } 5480 5481 void Assembler::xgetbv() { 5482 emit_int8(0x0F); 5483 emit_int8(0x01); 5484 emit_int8((unsigned char)0xD0); 5485 } 5486 5487 void Assembler::xorl(Register dst, int32_t imm32) { 5488 prefix(dst); 5489 emit_arith(0x81, 0xF0, dst, imm32); 5490 } 5491 5492 void Assembler::xorl(Register dst, Address src) { 5493 InstructionMark im(this); 5494 prefix(src, dst); 5495 emit_int8(0x33); 5496 emit_operand(dst, src); 5497 } 5498 5499 void Assembler::xorl(Register dst, Register src) { 5500 (void) prefix_and_encode(dst->encoding(), src->encoding()); 5501 emit_arith(0x33, 0xC0, dst, src); 5502 } 5503 5504 void Assembler::xorb(Register dst, Address src) { 5505 InstructionMark im(this); 5506 prefix(src, dst); 5507 emit_int8(0x32); 5508 emit_operand(dst, src); 5509 } 5510 5511 void Assembler::xorw(Register dst, Register src) { 5512 (void)prefix_and_encode(dst->encoding(), src->encoding()); 5513 emit_arith(0x33, 0xC0, dst, src); 5514 } 5515 5516 // AVX 3-operands scalar float-point arithmetic instructions 5517 5518 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { 5519 assert(VM_Version::supports_avx(), ""); 5520 InstructionMark im(this); 5521 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5522 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5523 attributes.set_rex_vex_w_reverted(); 5524 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5525 emit_int8(0x58); 5526 emit_operand(dst, src); 5527 } 5528 5529 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 5530 assert(VM_Version::supports_avx(), ""); 5531 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5532 attributes.set_rex_vex_w_reverted(); 5533 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5534 emit_int8(0x58); 5535 emit_int8((unsigned char)(0xC0 | encode)); 5536 } 5537 5538 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { 5539 assert(VM_Version::supports_avx(), ""); 5540 InstructionMark im(this); 5541 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5542 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5543 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5544 emit_int8(0x58); 5545 emit_operand(dst, src); 5546 } 5547 5548 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 5549 assert(VM_Version::supports_avx(), ""); 5550 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5551 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5552 emit_int8(0x58); 5553 emit_int8((unsigned char)(0xC0 | encode)); 5554 } 5555 5556 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { 5557 assert(VM_Version::supports_avx(), ""); 5558 InstructionMark im(this); 5559 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5560 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5561 attributes.set_rex_vex_w_reverted(); 5562 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5563 emit_int8(0x5E); 5564 emit_operand(dst, src); 5565 } 5566 5567 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 5568 assert(VM_Version::supports_avx(), ""); 5569 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5570 attributes.set_rex_vex_w_reverted(); 5571 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5572 emit_int8(0x5E); 5573 emit_int8((unsigned char)(0xC0 | encode)); 5574 } 5575 5576 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { 5577 assert(VM_Version::supports_avx(), ""); 5578 InstructionMark im(this); 5579 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5580 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5581 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5582 emit_int8(0x5E); 5583 emit_operand(dst, src); 5584 } 5585 5586 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 5587 assert(VM_Version::supports_avx(), ""); 5588 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5589 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5590 emit_int8(0x5E); 5591 emit_int8((unsigned char)(0xC0 | encode)); 5592 } 5593 5594 void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 5595 assert(VM_Version::supports_fma(), ""); 5596 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5597 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5598 emit_int8((unsigned char)0xB9); 5599 emit_int8((unsigned char)(0xC0 | encode)); 5600 } 5601 5602 void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 5603 assert(VM_Version::supports_fma(), ""); 5604 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5605 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5606 emit_int8((unsigned char)0xB9); 5607 emit_int8((unsigned char)(0xC0 | encode)); 5608 } 5609 5610 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { 5611 assert(VM_Version::supports_avx(), ""); 5612 InstructionMark im(this); 5613 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5614 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5615 attributes.set_rex_vex_w_reverted(); 5616 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5617 emit_int8(0x59); 5618 emit_operand(dst, src); 5619 } 5620 5621 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 5622 assert(VM_Version::supports_avx(), ""); 5623 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5624 attributes.set_rex_vex_w_reverted(); 5625 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5626 emit_int8(0x59); 5627 emit_int8((unsigned char)(0xC0 | encode)); 5628 } 5629 5630 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { 5631 assert(VM_Version::supports_avx(), ""); 5632 InstructionMark im(this); 5633 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5634 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5635 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5636 emit_int8(0x59); 5637 emit_operand(dst, src); 5638 } 5639 5640 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 5641 assert(VM_Version::supports_avx(), ""); 5642 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5643 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5644 emit_int8(0x59); 5645 emit_int8((unsigned char)(0xC0 | encode)); 5646 } 5647 5648 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { 5649 assert(VM_Version::supports_avx(), ""); 5650 InstructionMark im(this); 5651 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5652 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5653 attributes.set_rex_vex_w_reverted(); 5654 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5655 emit_int8(0x5C); 5656 emit_operand(dst, src); 5657 } 5658 5659 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 5660 assert(VM_Version::supports_avx(), ""); 5661 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5662 attributes.set_rex_vex_w_reverted(); 5663 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 5664 emit_int8(0x5C); 5665 emit_int8((unsigned char)(0xC0 | encode)); 5666 } 5667 5668 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { 5669 assert(VM_Version::supports_avx(), ""); 5670 InstructionMark im(this); 5671 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5672 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5673 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5674 emit_int8(0x5C); 5675 emit_operand(dst, src); 5676 } 5677 5678 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 5679 assert(VM_Version::supports_avx(), ""); 5680 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 5681 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 5682 emit_int8(0x5C); 5683 emit_int8((unsigned char)(0xC0 | encode)); 5684 } 5685 5686 //====================VECTOR ARITHMETIC===================================== 5687 5688 // Float-point vector arithmetic 5689 5690 void Assembler::addpd(XMMRegister dst, XMMRegister src) { 5691 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5692 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5693 attributes.set_rex_vex_w_reverted(); 5694 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5695 emit_int8(0x58); 5696 emit_int8((unsigned char)(0xC0 | encode)); 5697 } 5698 5699 void Assembler::addpd(XMMRegister dst, Address src) { 5700 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5701 InstructionMark im(this); 5702 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5703 attributes.set_rex_vex_w_reverted(); 5704 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5705 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5706 emit_int8(0x58); 5707 emit_operand(dst, src); 5708 } 5709 5710 5711 void Assembler::addps(XMMRegister dst, XMMRegister src) { 5712 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5713 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5714 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5715 emit_int8(0x58); 5716 emit_int8((unsigned char)(0xC0 | encode)); 5717 } 5718 5719 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5720 assert(VM_Version::supports_avx(), ""); 5721 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5722 attributes.set_rex_vex_w_reverted(); 5723 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5724 emit_int8(0x58); 5725 emit_int8((unsigned char)(0xC0 | encode)); 5726 } 5727 5728 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5729 assert(VM_Version::supports_avx(), ""); 5730 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5731 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5732 emit_int8(0x58); 5733 emit_int8((unsigned char)(0xC0 | encode)); 5734 } 5735 5736 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5737 assert(VM_Version::supports_avx(), ""); 5738 InstructionMark im(this); 5739 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5740 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5741 attributes.set_rex_vex_w_reverted(); 5742 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5743 emit_int8(0x58); 5744 emit_operand(dst, src); 5745 } 5746 5747 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5748 assert(VM_Version::supports_avx(), ""); 5749 InstructionMark im(this); 5750 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5751 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5752 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5753 emit_int8(0x58); 5754 emit_operand(dst, src); 5755 } 5756 5757 void Assembler::subpd(XMMRegister dst, XMMRegister src) { 5758 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5759 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5760 attributes.set_rex_vex_w_reverted(); 5761 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5762 emit_int8(0x5C); 5763 emit_int8((unsigned char)(0xC0 | encode)); 5764 } 5765 5766 void Assembler::subps(XMMRegister dst, XMMRegister src) { 5767 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5768 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5769 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5770 emit_int8(0x5C); 5771 emit_int8((unsigned char)(0xC0 | encode)); 5772 } 5773 5774 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5775 assert(VM_Version::supports_avx(), ""); 5776 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5777 attributes.set_rex_vex_w_reverted(); 5778 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5779 emit_int8(0x5C); 5780 emit_int8((unsigned char)(0xC0 | encode)); 5781 } 5782 5783 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5784 assert(VM_Version::supports_avx(), ""); 5785 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5786 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5787 emit_int8(0x5C); 5788 emit_int8((unsigned char)(0xC0 | encode)); 5789 } 5790 5791 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5792 assert(VM_Version::supports_avx(), ""); 5793 InstructionMark im(this); 5794 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5795 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5796 attributes.set_rex_vex_w_reverted(); 5797 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5798 emit_int8(0x5C); 5799 emit_operand(dst, src); 5800 } 5801 5802 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5803 assert(VM_Version::supports_avx(), ""); 5804 InstructionMark im(this); 5805 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5806 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5807 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5808 emit_int8(0x5C); 5809 emit_operand(dst, src); 5810 } 5811 5812 void Assembler::mulpd(XMMRegister dst, XMMRegister src) { 5813 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5814 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5815 attributes.set_rex_vex_w_reverted(); 5816 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5817 emit_int8(0x59); 5818 emit_int8((unsigned char)(0xC0 | encode)); 5819 } 5820 5821 void Assembler::mulpd(XMMRegister dst, Address src) { 5822 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5823 InstructionMark im(this); 5824 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5825 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5826 attributes.set_rex_vex_w_reverted(); 5827 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5828 emit_int8(0x59); 5829 emit_operand(dst, src); 5830 } 5831 5832 void Assembler::mulps(XMMRegister dst, XMMRegister src) { 5833 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5834 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5835 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5836 emit_int8(0x59); 5837 emit_int8((unsigned char)(0xC0 | encode)); 5838 } 5839 5840 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5841 assert(VM_Version::supports_avx(), ""); 5842 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5843 attributes.set_rex_vex_w_reverted(); 5844 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5845 emit_int8(0x59); 5846 emit_int8((unsigned char)(0xC0 | encode)); 5847 } 5848 5849 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5850 assert(VM_Version::supports_avx(), ""); 5851 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5852 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5853 emit_int8(0x59); 5854 emit_int8((unsigned char)(0xC0 | encode)); 5855 } 5856 5857 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5858 assert(VM_Version::supports_avx(), ""); 5859 InstructionMark im(this); 5860 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5861 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5862 attributes.set_rex_vex_w_reverted(); 5863 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5864 emit_int8(0x59); 5865 emit_operand(dst, src); 5866 } 5867 5868 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5869 assert(VM_Version::supports_avx(), ""); 5870 InstructionMark im(this); 5871 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5872 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5873 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5874 emit_int8(0x59); 5875 emit_operand(dst, src); 5876 } 5877 5878 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5879 assert(VM_Version::supports_fma(), ""); 5880 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5881 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5882 emit_int8((unsigned char)0xB8); 5883 emit_int8((unsigned char)(0xC0 | encode)); 5884 } 5885 5886 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5887 assert(VM_Version::supports_fma(), ""); 5888 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5889 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5890 emit_int8((unsigned char)0xB8); 5891 emit_int8((unsigned char)(0xC0 | encode)); 5892 } 5893 5894 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 5895 assert(VM_Version::supports_fma(), ""); 5896 InstructionMark im(this); 5897 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5898 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5899 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5900 emit_int8((unsigned char)0xB8); 5901 emit_operand(dst, src2); 5902 } 5903 5904 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 5905 assert(VM_Version::supports_fma(), ""); 5906 InstructionMark im(this); 5907 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5908 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5909 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5910 emit_int8((unsigned char)0xB8); 5911 emit_operand(dst, src2); 5912 } 5913 5914 void Assembler::divpd(XMMRegister dst, XMMRegister src) { 5915 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5916 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5917 attributes.set_rex_vex_w_reverted(); 5918 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5919 emit_int8(0x5E); 5920 emit_int8((unsigned char)(0xC0 | encode)); 5921 } 5922 5923 void Assembler::divps(XMMRegister dst, XMMRegister src) { 5924 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5925 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5926 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5927 emit_int8(0x5E); 5928 emit_int8((unsigned char)(0xC0 | encode)); 5929 } 5930 5931 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5932 assert(VM_Version::supports_avx(), ""); 5933 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5934 attributes.set_rex_vex_w_reverted(); 5935 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5936 emit_int8(0x5E); 5937 emit_int8((unsigned char)(0xC0 | encode)); 5938 } 5939 5940 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5941 assert(VM_Version::supports_avx(), ""); 5942 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5943 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5944 emit_int8(0x5E); 5945 emit_int8((unsigned char)(0xC0 | encode)); 5946 } 5947 5948 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5949 assert(VM_Version::supports_avx(), ""); 5950 InstructionMark im(this); 5951 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5952 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5953 attributes.set_rex_vex_w_reverted(); 5954 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5955 emit_int8(0x5E); 5956 emit_operand(dst, src); 5957 } 5958 5959 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5960 assert(VM_Version::supports_avx(), ""); 5961 InstructionMark im(this); 5962 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5963 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5964 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5965 emit_int8(0x5E); 5966 emit_operand(dst, src); 5967 } 5968 5969 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) { 5970 assert(VM_Version::supports_avx(), ""); 5971 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5972 attributes.set_rex_vex_w_reverted(); 5973 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5974 emit_int8(0x51); 5975 emit_int8((unsigned char)(0xC0 | encode)); 5976 } 5977 5978 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) { 5979 assert(VM_Version::supports_avx(), ""); 5980 InstructionMark im(this); 5981 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5982 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5983 attributes.set_rex_vex_w_reverted(); 5984 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5985 emit_int8(0x51); 5986 emit_operand(dst, src); 5987 } 5988 5989 void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) { 5990 assert(VM_Version::supports_avx(), ""); 5991 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5992 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5993 emit_int8(0x51); 5994 emit_int8((unsigned char)(0xC0 | encode)); 5995 } 5996 5997 void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) { 5998 assert(VM_Version::supports_avx(), ""); 5999 InstructionMark im(this); 6000 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6001 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6002 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6003 emit_int8(0x51); 6004 emit_operand(dst, src); 6005 } 6006 6007 void Assembler::andpd(XMMRegister dst, XMMRegister src) { 6008 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6009 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6010 attributes.set_rex_vex_w_reverted(); 6011 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6012 emit_int8(0x54); 6013 emit_int8((unsigned char)(0xC0 | encode)); 6014 } 6015 6016 void Assembler::andps(XMMRegister dst, XMMRegister src) { 6017 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6018 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6019 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6020 emit_int8(0x54); 6021 emit_int8((unsigned char)(0xC0 | encode)); 6022 } 6023 6024 void Assembler::andps(XMMRegister dst, Address src) { 6025 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6026 InstructionMark im(this); 6027 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6028 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6029 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6030 emit_int8(0x54); 6031 emit_operand(dst, src); 6032 } 6033 6034 void Assembler::andpd(XMMRegister dst, Address src) { 6035 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6036 InstructionMark im(this); 6037 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6038 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6039 attributes.set_rex_vex_w_reverted(); 6040 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6041 emit_int8(0x54); 6042 emit_operand(dst, src); 6043 } 6044 6045 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6046 assert(VM_Version::supports_avx(), ""); 6047 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6048 attributes.set_rex_vex_w_reverted(); 6049 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6050 emit_int8(0x54); 6051 emit_int8((unsigned char)(0xC0 | encode)); 6052 } 6053 6054 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6055 assert(VM_Version::supports_avx(), ""); 6056 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6057 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6058 emit_int8(0x54); 6059 emit_int8((unsigned char)(0xC0 | encode)); 6060 } 6061 6062 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6063 assert(VM_Version::supports_avx(), ""); 6064 InstructionMark im(this); 6065 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6066 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6067 attributes.set_rex_vex_w_reverted(); 6068 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6069 emit_int8(0x54); 6070 emit_operand(dst, src); 6071 } 6072 6073 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6074 assert(VM_Version::supports_avx(), ""); 6075 InstructionMark im(this); 6076 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6077 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6078 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6079 emit_int8(0x54); 6080 emit_operand(dst, src); 6081 } 6082 6083 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) { 6084 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6085 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6086 attributes.set_rex_vex_w_reverted(); 6087 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6088 emit_int8(0x15); 6089 emit_int8((unsigned char)(0xC0 | encode)); 6090 } 6091 6092 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { 6093 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6094 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6095 attributes.set_rex_vex_w_reverted(); 6096 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6097 emit_int8(0x14); 6098 emit_int8((unsigned char)(0xC0 | encode)); 6099 } 6100 6101 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { 6102 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6103 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6104 attributes.set_rex_vex_w_reverted(); 6105 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6106 emit_int8(0x57); 6107 emit_int8((unsigned char)(0xC0 | encode)); 6108 } 6109 6110 void Assembler::xorps(XMMRegister dst, XMMRegister src) { 6111 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6112 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6113 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6114 emit_int8(0x57); 6115 emit_int8((unsigned char)(0xC0 | encode)); 6116 } 6117 6118 void Assembler::xorpd(XMMRegister dst, Address src) { 6119 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6120 InstructionMark im(this); 6121 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6122 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6123 attributes.set_rex_vex_w_reverted(); 6124 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6125 emit_int8(0x57); 6126 emit_operand(dst, src); 6127 } 6128 6129 void Assembler::xorps(XMMRegister dst, Address src) { 6130 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6131 InstructionMark im(this); 6132 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6133 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6134 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6135 emit_int8(0x57); 6136 emit_operand(dst, src); 6137 } 6138 6139 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6140 assert(VM_Version::supports_avx(), ""); 6141 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6142 attributes.set_rex_vex_w_reverted(); 6143 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6144 emit_int8(0x57); 6145 emit_int8((unsigned char)(0xC0 | encode)); 6146 } 6147 6148 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6149 assert(VM_Version::supports_avx(), ""); 6150 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6151 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6152 emit_int8(0x57); 6153 emit_int8((unsigned char)(0xC0 | encode)); 6154 } 6155 6156 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6157 assert(VM_Version::supports_avx(), ""); 6158 InstructionMark im(this); 6159 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6160 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6161 attributes.set_rex_vex_w_reverted(); 6162 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6163 emit_int8(0x57); 6164 emit_operand(dst, src); 6165 } 6166 6167 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6168 assert(VM_Version::supports_avx(), ""); 6169 InstructionMark im(this); 6170 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6171 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6172 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6173 emit_int8(0x57); 6174 emit_operand(dst, src); 6175 } 6176 6177 // Integer vector arithmetic 6178 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6179 assert(VM_Version::supports_avx() && (vector_len == 0) || 6180 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 6181 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6182 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6183 emit_int8(0x01); 6184 emit_int8((unsigned char)(0xC0 | encode)); 6185 } 6186 6187 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6188 assert(VM_Version::supports_avx() && (vector_len == 0) || 6189 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 6190 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 6191 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6192 emit_int8(0x02); 6193 emit_int8((unsigned char)(0xC0 | encode)); 6194 } 6195 6196 void Assembler::paddb(XMMRegister dst, XMMRegister src) { 6197 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6198 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6199 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6200 emit_int8((unsigned char)0xFC); 6201 emit_int8((unsigned char)(0xC0 | encode)); 6202 } 6203 6204 void Assembler::paddw(XMMRegister dst, XMMRegister src) { 6205 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6206 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6207 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6208 emit_int8((unsigned char)0xFD); 6209 emit_int8((unsigned char)(0xC0 | encode)); 6210 } 6211 6212 void Assembler::paddd(XMMRegister dst, XMMRegister src) { 6213 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6214 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6215 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6216 emit_int8((unsigned char)0xFE); 6217 emit_int8((unsigned char)(0xC0 | encode)); 6218 } 6219 6220 void Assembler::paddd(XMMRegister dst, Address src) { 6221 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6222 InstructionMark im(this); 6223 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6224 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6225 emit_int8((unsigned char)0xFE); 6226 emit_operand(dst, src); 6227 } 6228 6229 void Assembler::paddq(XMMRegister dst, XMMRegister src) { 6230 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6231 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6232 attributes.set_rex_vex_w_reverted(); 6233 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6234 emit_int8((unsigned char)0xD4); 6235 emit_int8((unsigned char)(0xC0 | encode)); 6236 } 6237 6238 void Assembler::phaddw(XMMRegister dst, XMMRegister src) { 6239 assert(VM_Version::supports_sse3(), ""); 6240 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6241 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6242 emit_int8(0x01); 6243 emit_int8((unsigned char)(0xC0 | encode)); 6244 } 6245 6246 void Assembler::phaddd(XMMRegister dst, XMMRegister src) { 6247 assert(VM_Version::supports_sse3(), ""); 6248 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 6249 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6250 emit_int8(0x02); 6251 emit_int8((unsigned char)(0xC0 | encode)); 6252 } 6253 6254 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6255 assert(UseAVX > 0, "requires some form of AVX"); 6256 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6257 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6258 emit_int8((unsigned char)0xFC); 6259 emit_int8((unsigned char)(0xC0 | encode)); 6260 } 6261 6262 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6263 assert(UseAVX > 0, "requires some form of AVX"); 6264 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6265 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6266 emit_int8((unsigned char)0xFD); 6267 emit_int8((unsigned char)(0xC0 | encode)); 6268 } 6269 6270 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6271 assert(UseAVX > 0, "requires some form of AVX"); 6272 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6273 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6274 emit_int8((unsigned char)0xFE); 6275 emit_int8((unsigned char)(0xC0 | encode)); 6276 } 6277 6278 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6279 assert(UseAVX > 0, "requires some form of AVX"); 6280 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6281 attributes.set_rex_vex_w_reverted(); 6282 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6283 emit_int8((unsigned char)0xD4); 6284 emit_int8((unsigned char)(0xC0 | encode)); 6285 } 6286 6287 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6288 assert(UseAVX > 0, "requires some form of AVX"); 6289 InstructionMark im(this); 6290 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6291 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 6292 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6293 emit_int8((unsigned char)0xFC); 6294 emit_operand(dst, src); 6295 } 6296 6297 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6298 assert(UseAVX > 0, "requires some form of AVX"); 6299 InstructionMark im(this); 6300 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6301 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 6302 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6303 emit_int8((unsigned char)0xFD); 6304 emit_operand(dst, src); 6305 } 6306 6307 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6308 assert(UseAVX > 0, "requires some form of AVX"); 6309 InstructionMark im(this); 6310 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6311 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6312 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6313 emit_int8((unsigned char)0xFE); 6314 emit_operand(dst, src); 6315 } 6316 6317 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6318 assert(UseAVX > 0, "requires some form of AVX"); 6319 InstructionMark im(this); 6320 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6321 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6322 attributes.set_rex_vex_w_reverted(); 6323 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6324 emit_int8((unsigned char)0xD4); 6325 emit_operand(dst, src); 6326 } 6327 6328 void Assembler::psubb(XMMRegister dst, XMMRegister src) { 6329 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6330 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6331 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6332 emit_int8((unsigned char)0xF8); 6333 emit_int8((unsigned char)(0xC0 | encode)); 6334 } 6335 6336 void Assembler::psubw(XMMRegister dst, XMMRegister src) { 6337 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6338 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6339 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6340 emit_int8((unsigned char)0xF9); 6341 emit_int8((unsigned char)(0xC0 | encode)); 6342 } 6343 6344 void Assembler::psubd(XMMRegister dst, XMMRegister src) { 6345 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6346 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6347 emit_int8((unsigned char)0xFA); 6348 emit_int8((unsigned char)(0xC0 | encode)); 6349 } 6350 6351 void Assembler::psubq(XMMRegister dst, XMMRegister src) { 6352 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6353 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6354 attributes.set_rex_vex_w_reverted(); 6355 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6356 emit_int8((unsigned char)0xFB); 6357 emit_int8((unsigned char)(0xC0 | encode)); 6358 } 6359 6360 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6361 assert(UseAVX > 0, "requires some form of AVX"); 6362 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6363 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6364 emit_int8((unsigned char)0xF8); 6365 emit_int8((unsigned char)(0xC0 | encode)); 6366 } 6367 6368 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6369 assert(UseAVX > 0, "requires some form of AVX"); 6370 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6371 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6372 emit_int8((unsigned char)0xF9); 6373 emit_int8((unsigned char)(0xC0 | encode)); 6374 } 6375 6376 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6377 assert(UseAVX > 0, "requires some form of AVX"); 6378 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6379 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6380 emit_int8((unsigned char)0xFA); 6381 emit_int8((unsigned char)(0xC0 | encode)); 6382 } 6383 6384 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6385 assert(UseAVX > 0, "requires some form of AVX"); 6386 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6387 attributes.set_rex_vex_w_reverted(); 6388 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6389 emit_int8((unsigned char)0xFB); 6390 emit_int8((unsigned char)(0xC0 | encode)); 6391 } 6392 6393 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6394 assert(UseAVX > 0, "requires some form of AVX"); 6395 InstructionMark im(this); 6396 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6397 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 6398 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6399 emit_int8((unsigned char)0xF8); 6400 emit_operand(dst, src); 6401 } 6402 6403 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6404 assert(UseAVX > 0, "requires some form of AVX"); 6405 InstructionMark im(this); 6406 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6407 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 6408 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6409 emit_int8((unsigned char)0xF9); 6410 emit_operand(dst, src); 6411 } 6412 6413 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6414 assert(UseAVX > 0, "requires some form of AVX"); 6415 InstructionMark im(this); 6416 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6417 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6418 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6419 emit_int8((unsigned char)0xFA); 6420 emit_operand(dst, src); 6421 } 6422 6423 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6424 assert(UseAVX > 0, "requires some form of AVX"); 6425 InstructionMark im(this); 6426 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6427 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6428 attributes.set_rex_vex_w_reverted(); 6429 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6430 emit_int8((unsigned char)0xFB); 6431 emit_operand(dst, src); 6432 } 6433 6434 void Assembler::pmullw(XMMRegister dst, XMMRegister src) { 6435 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6436 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6437 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6438 emit_int8((unsigned char)0xD5); 6439 emit_int8((unsigned char)(0xC0 | encode)); 6440 } 6441 6442 void Assembler::pmulld(XMMRegister dst, XMMRegister src) { 6443 assert(VM_Version::supports_sse4_1(), ""); 6444 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6445 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6446 emit_int8(0x40); 6447 emit_int8((unsigned char)(0xC0 | encode)); 6448 } 6449 6450 void Assembler::pmuludq(XMMRegister dst, XMMRegister src) { 6451 assert(VM_Version::supports_sse2(), ""); 6452 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6453 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6454 emit_int8((unsigned char)(0xF4)); 6455 emit_int8((unsigned char)(0xC0 | encode)); 6456 } 6457 6458 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6459 assert(UseAVX > 0, "requires some form of AVX"); 6460 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6461 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6462 emit_int8((unsigned char)0xD5); 6463 emit_int8((unsigned char)(0xC0 | encode)); 6464 } 6465 6466 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6467 assert(UseAVX > 0, "requires some form of AVX"); 6468 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6469 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6470 emit_int8(0x40); 6471 emit_int8((unsigned char)(0xC0 | encode)); 6472 } 6473 6474 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6475 assert(UseAVX > 2, "requires some form of EVEX"); 6476 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6477 attributes.set_is_evex_instruction(); 6478 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6479 emit_int8(0x40); 6480 emit_int8((unsigned char)(0xC0 | encode)); 6481 } 6482 6483 void Assembler::vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6484 assert(UseAVX > 0, "requires some form of AVX"); 6485 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6486 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6487 emit_int8((unsigned char)(0xF4)); 6488 emit_int8((unsigned char)(0xC0 | encode)); 6489 } 6490 6491 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6492 assert(UseAVX > 0, "requires some form of AVX"); 6493 InstructionMark im(this); 6494 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6495 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 6496 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6497 emit_int8((unsigned char)0xD5); 6498 emit_operand(dst, src); 6499 } 6500 6501 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6502 assert(UseAVX > 0, "requires some form of AVX"); 6503 InstructionMark im(this); 6504 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6505 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6506 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6507 emit_int8(0x40); 6508 emit_operand(dst, src); 6509 } 6510 6511 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6512 assert(UseAVX > 2, "requires some form of EVEX"); 6513 InstructionMark im(this); 6514 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 6515 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6516 attributes.set_is_evex_instruction(); 6517 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6518 emit_int8(0x40); 6519 emit_operand(dst, src); 6520 } 6521 6522 // Min, max 6523 void Assembler::pminsb(XMMRegister dst, XMMRegister src) { 6524 assert(VM_Version::supports_sse4_1(), ""); 6525 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6526 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6527 emit_int8(0x38); 6528 emit_int8((unsigned char)(0xC0 | encode)); 6529 } 6530 6531 void Assembler::vpminsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6532 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 6533 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 6534 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6535 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6536 emit_int8(0x38); 6537 emit_int8((unsigned char)(0xC0 | encode)); 6538 } 6539 6540 void Assembler::pminsw(XMMRegister dst, XMMRegister src) { 6541 assert(VM_Version::supports_sse2(), ""); 6542 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6543 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6544 emit_int8((unsigned char)0xEA); 6545 emit_int8((unsigned char)(0xC0 | encode)); 6546 } 6547 6548 void Assembler::vpminsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6549 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 6550 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 6551 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6552 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6553 emit_int8((unsigned char)0xEA); 6554 emit_int8((unsigned char)(0xC0 | encode)); 6555 } 6556 6557 void Assembler::pminsd(XMMRegister dst, XMMRegister src) { 6558 assert(VM_Version::supports_sse4_1(), ""); 6559 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 6560 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6561 emit_int8(0x39); 6562 emit_int8((unsigned char)(0xC0 | encode)); 6563 } 6564 6565 void Assembler::vpminsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6566 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 6567 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), ""); 6568 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 6569 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6570 emit_int8(0x39); 6571 emit_int8((unsigned char)(0xC0 | encode)); 6572 } 6573 6574 void Assembler::vpminsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6575 assert(UseAVX > 2, "requires AVX512F"); 6576 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6577 attributes.set_is_evex_instruction(); 6578 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6579 emit_int8(0x39); 6580 emit_int8((unsigned char)(0xC0 | encode)); 6581 } 6582 6583 void Assembler::minps(XMMRegister dst, XMMRegister src) { 6584 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6585 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 6586 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6587 emit_int8(0x5D); 6588 emit_int8((unsigned char)(0xC0 | encode)); 6589 } 6590 void Assembler::vminps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6591 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 6592 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6593 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6594 emit_int8(0x5D); 6595 emit_int8((unsigned char)(0xC0 | encode)); 6596 } 6597 6598 void Assembler::minpd(XMMRegister dst, XMMRegister src) { 6599 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6600 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 6601 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6602 emit_int8(0x5D); 6603 emit_int8((unsigned char)(0xC0 | encode)); 6604 } 6605 void Assembler::vminpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6606 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 6607 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6608 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6609 emit_int8(0x5D); 6610 emit_int8((unsigned char)(0xC0 | encode)); 6611 } 6612 6613 void Assembler::pmaxsb(XMMRegister dst, XMMRegister src) { 6614 assert(VM_Version::supports_sse4_1(), ""); 6615 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6616 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6617 emit_int8(0x3C); 6618 emit_int8((unsigned char)(0xC0 | encode)); 6619 } 6620 6621 void Assembler::vpmaxsb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6622 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 6623 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 6624 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6625 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6626 emit_int8(0x3C); 6627 emit_int8((unsigned char)(0xC0 | encode)); 6628 } 6629 6630 void Assembler::pmaxsw(XMMRegister dst, XMMRegister src) { 6631 assert(VM_Version::supports_sse2(), ""); 6632 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 6633 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6634 emit_int8((unsigned char)0xEE); 6635 emit_int8((unsigned char)(0xC0 | encode)); 6636 } 6637 6638 void Assembler::vpmaxsw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6639 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 6640 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_avx512bw()), ""); 6641 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6642 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6643 emit_int8((unsigned char)0xEE); 6644 emit_int8((unsigned char)(0xC0 | encode)); 6645 } 6646 6647 void Assembler::pmaxsd(XMMRegister dst, XMMRegister src) { 6648 assert(VM_Version::supports_sse4_1(), ""); 6649 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 6650 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6651 emit_int8(0x3D); 6652 emit_int8((unsigned char)(0xC0 | encode)); 6653 } 6654 6655 void Assembler::vpmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6656 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 6657 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : VM_Version::supports_evex()), ""); 6658 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true); 6659 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6660 emit_int8(0x3D); 6661 emit_int8((unsigned char)(0xC0 | encode)); 6662 } 6663 6664 void Assembler::vpmaxsq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6665 assert(UseAVX > 2, "requires AVX512F"); 6666 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6667 attributes.set_is_evex_instruction(); 6668 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6669 emit_int8(0x3D); 6670 emit_int8((unsigned char)(0xC0 | encode)); 6671 } 6672 6673 void Assembler::maxps(XMMRegister dst, XMMRegister src) { 6674 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6675 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 6676 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6677 emit_int8(0x5F); 6678 emit_int8((unsigned char)(0xC0 | encode)); 6679 } 6680 6681 void Assembler::vmaxps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6682 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 6683 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6684 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6685 emit_int8(0x5F); 6686 emit_int8((unsigned char)(0xC0 | encode)); 6687 } 6688 6689 void Assembler::maxpd(XMMRegister dst, XMMRegister src) { 6690 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6691 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 6692 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6693 emit_int8(0x5F); 6694 emit_int8((unsigned char)(0xC0 | encode)); 6695 } 6696 6697 void Assembler::vmaxpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6698 assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), ""); 6699 InstructionAttr attributes(vector_len, /* vex_w */true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6700 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6701 emit_int8(0x5F); 6702 emit_int8((unsigned char)(0xC0 | encode)); 6703 } 6704 6705 // Shift packed integers left by specified number of bits. 6706 void Assembler::psllw(XMMRegister dst, int shift) { 6707 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6708 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6709 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 6710 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6711 emit_int8(0x71); 6712 emit_int8((unsigned char)(0xC0 | encode)); 6713 emit_int8(shift & 0xFF); 6714 } 6715 6716 void Assembler::pslld(XMMRegister dst, int shift) { 6717 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6718 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6719 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 6720 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6721 emit_int8(0x72); 6722 emit_int8((unsigned char)(0xC0 | encode)); 6723 emit_int8(shift & 0xFF); 6724 } 6725 6726 void Assembler::psllq(XMMRegister dst, int shift) { 6727 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6728 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6729 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 6730 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6731 emit_int8(0x73); 6732 emit_int8((unsigned char)(0xC0 | encode)); 6733 emit_int8(shift & 0xFF); 6734 } 6735 6736 void Assembler::psllw(XMMRegister dst, XMMRegister shift) { 6737 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6738 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6739 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6740 emit_int8((unsigned char)0xF1); 6741 emit_int8((unsigned char)(0xC0 | encode)); 6742 } 6743 6744 void Assembler::pslld(XMMRegister dst, XMMRegister shift) { 6745 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6746 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6747 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6748 emit_int8((unsigned char)0xF2); 6749 emit_int8((unsigned char)(0xC0 | encode)); 6750 } 6751 6752 void Assembler::psllq(XMMRegister dst, XMMRegister shift) { 6753 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6754 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6755 attributes.set_rex_vex_w_reverted(); 6756 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6757 emit_int8((unsigned char)0xF3); 6758 emit_int8((unsigned char)(0xC0 | encode)); 6759 } 6760 6761 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6762 assert(UseAVX > 0, "requires some form of AVX"); 6763 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6764 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 6765 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6766 emit_int8(0x71); 6767 emit_int8((unsigned char)(0xC0 | encode)); 6768 emit_int8(shift & 0xFF); 6769 } 6770 6771 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6772 assert(UseAVX > 0, "requires some form of AVX"); 6773 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6774 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6775 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 6776 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6777 emit_int8(0x72); 6778 emit_int8((unsigned char)(0xC0 | encode)); 6779 emit_int8(shift & 0xFF); 6780 } 6781 6782 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6783 assert(UseAVX > 0, "requires some form of AVX"); 6784 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6785 attributes.set_rex_vex_w_reverted(); 6786 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 6787 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6788 emit_int8(0x73); 6789 emit_int8((unsigned char)(0xC0 | encode)); 6790 emit_int8(shift & 0xFF); 6791 } 6792 6793 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6794 assert(UseAVX > 0, "requires some form of AVX"); 6795 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6796 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6797 emit_int8((unsigned char)0xF1); 6798 emit_int8((unsigned char)(0xC0 | encode)); 6799 } 6800 6801 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6802 assert(UseAVX > 0, "requires some form of AVX"); 6803 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6804 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6805 emit_int8((unsigned char)0xF2); 6806 emit_int8((unsigned char)(0xC0 | encode)); 6807 } 6808 6809 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6810 assert(UseAVX > 0, "requires some form of AVX"); 6811 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6812 attributes.set_rex_vex_w_reverted(); 6813 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6814 emit_int8((unsigned char)0xF3); 6815 emit_int8((unsigned char)(0xC0 | encode)); 6816 } 6817 6818 // Shift packed integers logically right by specified number of bits. 6819 void Assembler::psrlw(XMMRegister dst, int shift) { 6820 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6821 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6822 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 6823 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6824 emit_int8(0x71); 6825 emit_int8((unsigned char)(0xC0 | encode)); 6826 emit_int8(shift & 0xFF); 6827 } 6828 6829 void Assembler::psrld(XMMRegister dst, int shift) { 6830 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6831 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6832 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 6833 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6834 emit_int8(0x72); 6835 emit_int8((unsigned char)(0xC0 | encode)); 6836 emit_int8(shift & 0xFF); 6837 } 6838 6839 void Assembler::psrlq(XMMRegister dst, int shift) { 6840 // Do not confuse it with psrldq SSE2 instruction which 6841 // shifts 128 bit value in xmm register by number of bytes. 6842 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6843 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6844 attributes.set_rex_vex_w_reverted(); 6845 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 6846 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6847 emit_int8(0x73); 6848 emit_int8((unsigned char)(0xC0 | encode)); 6849 emit_int8(shift & 0xFF); 6850 } 6851 6852 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { 6853 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6854 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6855 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6856 emit_int8((unsigned char)0xD1); 6857 emit_int8((unsigned char)(0xC0 | encode)); 6858 } 6859 6860 void Assembler::psrld(XMMRegister dst, XMMRegister shift) { 6861 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6862 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6863 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6864 emit_int8((unsigned char)0xD2); 6865 emit_int8((unsigned char)(0xC0 | encode)); 6866 } 6867 6868 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { 6869 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6870 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6871 attributes.set_rex_vex_w_reverted(); 6872 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6873 emit_int8((unsigned char)0xD3); 6874 emit_int8((unsigned char)(0xC0 | encode)); 6875 } 6876 6877 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6878 assert(UseAVX > 0, "requires some form of AVX"); 6879 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6880 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 6881 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6882 emit_int8(0x71); 6883 emit_int8((unsigned char)(0xC0 | encode)); 6884 emit_int8(shift & 0xFF); 6885 } 6886 6887 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6888 assert(UseAVX > 0, "requires some form of AVX"); 6889 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6890 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 6891 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6892 emit_int8(0x72); 6893 emit_int8((unsigned char)(0xC0 | encode)); 6894 emit_int8(shift & 0xFF); 6895 } 6896 6897 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6898 assert(UseAVX > 0, "requires some form of AVX"); 6899 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6900 attributes.set_rex_vex_w_reverted(); 6901 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 6902 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6903 emit_int8(0x73); 6904 emit_int8((unsigned char)(0xC0 | encode)); 6905 emit_int8(shift & 0xFF); 6906 } 6907 6908 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6909 assert(UseAVX > 0, "requires some form of AVX"); 6910 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6911 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6912 emit_int8((unsigned char)0xD1); 6913 emit_int8((unsigned char)(0xC0 | encode)); 6914 } 6915 6916 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6917 assert(UseAVX > 0, "requires some form of AVX"); 6918 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6919 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6920 emit_int8((unsigned char)0xD2); 6921 emit_int8((unsigned char)(0xC0 | encode)); 6922 } 6923 6924 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6925 assert(UseAVX > 0, "requires some form of AVX"); 6926 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6927 attributes.set_rex_vex_w_reverted(); 6928 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6929 emit_int8((unsigned char)0xD3); 6930 emit_int8((unsigned char)(0xC0 | encode)); 6931 } 6932 6933 void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6934 assert(VM_Version::supports_avx512bw(), ""); 6935 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6936 attributes.set_is_evex_instruction(); 6937 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6938 emit_int8(0x10); 6939 emit_int8((unsigned char)(0xC0 | encode)); 6940 } 6941 6942 void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6943 assert(VM_Version::supports_avx512bw(), ""); 6944 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6945 attributes.set_is_evex_instruction(); 6946 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6947 emit_int8(0x12); 6948 emit_int8((unsigned char)(0xC0 | encode)); 6949 } 6950 6951 // Shift packed integers arithmetically right by specified number of bits. 6952 void Assembler::psraw(XMMRegister dst, int shift) { 6953 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6954 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6955 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 6956 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6957 emit_int8(0x71); 6958 emit_int8((unsigned char)(0xC0 | encode)); 6959 emit_int8(shift & 0xFF); 6960 } 6961 6962 void Assembler::psrad(XMMRegister dst, int shift) { 6963 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6964 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6965 // XMM4 is for /4 encoding: 66 0F 72 /4 ib 6966 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6967 emit_int8(0x72); 6968 emit_int8((unsigned char)(0xC0 | encode)); 6969 emit_int8(shift & 0xFF); 6970 } 6971 6972 void Assembler::psraw(XMMRegister dst, XMMRegister shift) { 6973 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6974 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6975 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6976 emit_int8((unsigned char)0xE1); 6977 emit_int8((unsigned char)(0xC0 | encode)); 6978 } 6979 6980 void Assembler::psrad(XMMRegister dst, XMMRegister shift) { 6981 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6982 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6983 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6984 emit_int8((unsigned char)0xE2); 6985 emit_int8((unsigned char)(0xC0 | encode)); 6986 } 6987 6988 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6989 assert(UseAVX > 0, "requires some form of AVX"); 6990 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6991 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 6992 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6993 emit_int8(0x71); 6994 emit_int8((unsigned char)(0xC0 | encode)); 6995 emit_int8(shift & 0xFF); 6996 } 6997 6998 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6999 assert(UseAVX > 0, "requires some form of AVX"); 7000 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7001 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 7002 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7003 emit_int8(0x72); 7004 emit_int8((unsigned char)(0xC0 | encode)); 7005 emit_int8(shift & 0xFF); 7006 } 7007 7008 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7009 assert(UseAVX > 0, "requires some form of AVX"); 7010 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7011 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7012 emit_int8((unsigned char)0xE1); 7013 emit_int8((unsigned char)(0xC0 | encode)); 7014 } 7015 7016 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7017 assert(UseAVX > 0, "requires some form of AVX"); 7018 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7019 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7020 emit_int8((unsigned char)0xE2); 7021 emit_int8((unsigned char)(0xC0 | encode)); 7022 } 7023 7024 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 7025 assert(UseAVX > 2, "requires AVX512"); 7026 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7027 attributes.set_is_evex_instruction(); 7028 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7029 emit_int8((unsigned char)0x72); 7030 emit_int8((unsigned char)(0xC0 | encode)); 7031 emit_int8(shift & 0xFF); 7032 } 7033 7034 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7035 assert(UseAVX > 2, "requires AVX512"); 7036 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7037 attributes.set_is_evex_instruction(); 7038 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7039 emit_int8((unsigned char)0xE2); 7040 emit_int8((unsigned char)(0xC0 | encode)); 7041 } 7042 7043 //Variable Shift packed integers logically left. 7044 void Assembler::vpsllvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7045 assert(UseAVX > 1, "requires AVX2"); 7046 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7047 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7048 emit_int8(0x47); 7049 emit_int8((unsigned char)(0xC0 | encode)); 7050 } 7051 7052 void Assembler::vpsllvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7053 assert(UseAVX > 1, "requires AVX2"); 7054 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7055 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7056 emit_int8(0x47); 7057 emit_int8((unsigned char)(0xC0 | encode)); 7058 } 7059 7060 //Variable Shift packed integers logically right. 7061 void Assembler::vpsrlvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7062 assert(UseAVX > 1, "requires AVX2"); 7063 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7064 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7065 emit_int8(0x45); 7066 emit_int8((unsigned char)(0xC0 | encode)); 7067 } 7068 7069 void Assembler::vpsrlvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7070 assert(UseAVX > 1, "requires AVX2"); 7071 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7072 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7073 emit_int8(0x45); 7074 emit_int8((unsigned char)(0xC0 | encode)); 7075 } 7076 7077 //Variable right Shift arithmetic packed integers . 7078 void Assembler::vpsravd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7079 assert(UseAVX > 1, "requires AVX2"); 7080 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7081 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7082 emit_int8(0x46); 7083 emit_int8((unsigned char)(0xC0 | encode)); 7084 } 7085 7086 void Assembler::evpsravq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 7087 assert(UseAVX > 2, "requires AVX512"); 7088 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7089 attributes.set_is_evex_instruction(); 7090 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7091 emit_int8(0x46); 7092 emit_int8((unsigned char)(0xC0 | encode)); 7093 } 7094 7095 // logical operations packed integers 7096 void Assembler::pand(XMMRegister dst, XMMRegister src) { 7097 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7098 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7099 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7100 emit_int8((unsigned char)0xDB); 7101 emit_int8((unsigned char)(0xC0 | encode)); 7102 } 7103 7104 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7105 assert(UseAVX > 0, "requires some form of AVX"); 7106 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7107 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7108 emit_int8((unsigned char)0xDB); 7109 emit_int8((unsigned char)(0xC0 | encode)); 7110 } 7111 7112 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7113 assert(UseAVX > 0, "requires some form of AVX"); 7114 InstructionMark im(this); 7115 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7116 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7117 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7118 emit_int8((unsigned char)0xDB); 7119 emit_operand(dst, src); 7120 } 7121 7122 void Assembler::evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 7123 assert(VM_Version::supports_evex(), ""); 7124 // Encoding: EVEX.NDS.XXX.66.0F.W0 DB /r 7125 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7126 attributes.set_is_evex_instruction(); 7127 attributes.set_embedded_opmask_register_specifier(mask); 7128 if (merge) { 7129 attributes.reset_is_clear_context(); 7130 } 7131 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7132 emit_int8((unsigned char)0xDB); 7133 emit_int8((unsigned char)(0xC0 | encode)); 7134 } 7135 7136 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7137 assert(VM_Version::supports_evex(), ""); 7138 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7139 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7140 emit_int8((unsigned char)0xDB); 7141 emit_int8((unsigned char)(0xC0 | encode)); 7142 } 7143 7144 void Assembler::pandn(XMMRegister dst, XMMRegister src) { 7145 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7146 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7147 attributes.set_rex_vex_w_reverted(); 7148 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7149 emit_int8((unsigned char)0xDF); 7150 emit_int8((unsigned char)(0xC0 | encode)); 7151 } 7152 7153 void Assembler::por(XMMRegister dst, XMMRegister src) { 7154 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7155 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7156 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7157 emit_int8((unsigned char)0xEB); 7158 emit_int8((unsigned char)(0xC0 | encode)); 7159 } 7160 7161 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7162 assert(UseAVX > 0, "requires some form of AVX"); 7163 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7164 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7165 emit_int8((unsigned char)0xEB); 7166 emit_int8((unsigned char)(0xC0 | encode)); 7167 } 7168 7169 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7170 assert(UseAVX > 0, "requires some form of AVX"); 7171 InstructionMark im(this); 7172 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7173 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7174 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7175 emit_int8((unsigned char)0xEB); 7176 emit_operand(dst, src); 7177 } 7178 7179 void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7180 assert(VM_Version::supports_evex(), ""); 7181 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7182 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7183 emit_int8((unsigned char)0xEB); 7184 emit_int8((unsigned char)(0xC0 | encode)); 7185 } 7186 7187 7188 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 7189 assert(VM_Version::supports_evex(), ""); 7190 // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r 7191 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7192 attributes.set_is_evex_instruction(); 7193 attributes.set_embedded_opmask_register_specifier(mask); 7194 if (merge) { 7195 attributes.reset_is_clear_context(); 7196 } 7197 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7198 emit_int8((unsigned char)0xEB); 7199 emit_int8((unsigned char)(0xC0 | encode)); 7200 } 7201 7202 void Assembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 7203 assert(VM_Version::supports_evex(), ""); 7204 // Encoding: EVEX.NDS.XXX.66.0F.W0 EB /r 7205 InstructionMark im(this); 7206 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7207 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit); 7208 attributes.set_is_evex_instruction(); 7209 attributes.set_embedded_opmask_register_specifier(mask); 7210 if (merge) { 7211 attributes.reset_is_clear_context(); 7212 } 7213 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7214 emit_int8((unsigned char)0xEB); 7215 emit_operand(dst, src); 7216 } 7217 7218 void Assembler::pxor(XMMRegister dst, XMMRegister src) { 7219 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7220 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7221 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7222 emit_int8((unsigned char)0xEF); 7223 emit_int8((unsigned char)(0xC0 | encode)); 7224 } 7225 7226 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7227 assert(UseAVX > 0, "requires some form of AVX"); 7228 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7229 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7230 emit_int8((unsigned char)0xEF); 7231 emit_int8((unsigned char)(0xC0 | encode)); 7232 } 7233 7234 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7235 assert(UseAVX > 0, "requires some form of AVX"); 7236 InstructionMark im(this); 7237 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7238 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 7239 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7240 emit_int8((unsigned char)0xEF); 7241 emit_operand(dst, src); 7242 } 7243 7244 void Assembler::vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7245 assert(UseAVX > 2, "requires some form of EVEX"); 7246 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7247 attributes.set_rex_vex_w_reverted(); 7248 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7249 emit_int8((unsigned char)0xEF); 7250 emit_int8((unsigned char)(0xC0 | encode)); 7251 } 7252 7253 void Assembler::evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 7254 assert(VM_Version::supports_evex(), ""); 7255 // Encoding: EVEX.NDS.XXX.66.0F.W0 EF /r 7256 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7257 attributes.set_is_evex_instruction(); 7258 attributes.set_embedded_opmask_register_specifier(mask); 7259 if (merge) { 7260 attributes.reset_is_clear_context(); 7261 } 7262 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7263 emit_int8((unsigned char)0xEF); 7264 emit_int8((unsigned char)(0xC0 | encode)); 7265 } 7266 7267 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 7268 assert(VM_Version::supports_evex(), "requires EVEX support"); 7269 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7270 attributes.set_is_evex_instruction(); 7271 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7272 emit_int8((unsigned char)0xEF); 7273 emit_int8((unsigned char)(0xC0 | encode)); 7274 } 7275 7276 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 7277 assert(VM_Version::supports_evex(), "requires EVEX support"); 7278 assert(dst != xnoreg, "sanity"); 7279 InstructionMark im(this); 7280 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7281 attributes.set_is_evex_instruction(); 7282 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 7283 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7284 emit_int8((unsigned char)0xEF); 7285 emit_operand(dst, src); 7286 } 7287 7288 // vinserti forms 7289 7290 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 7291 assert(VM_Version::supports_avx2(), ""); 7292 assert(imm8 <= 0x01, "imm8: %u", imm8); 7293 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; 7294 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7295 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7296 emit_int8(0x38); 7297 emit_int8((unsigned char)(0xC0 | encode)); 7298 // 0x00 - insert into lower 128 bits 7299 // 0x01 - insert into upper 128 bits 7300 emit_int8(imm8 & 0x01); 7301 } 7302 7303 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 7304 assert(VM_Version::supports_avx2(), ""); 7305 assert(dst != xnoreg, "sanity"); 7306 assert(imm8 <= 0x01, "imm8: %u", imm8); 7307 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; 7308 InstructionMark im(this); 7309 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7310 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 7311 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7312 emit_int8(0x38); 7313 emit_operand(dst, src); 7314 // 0x00 - insert into lower 128 bits 7315 // 0x01 - insert into upper 128 bits 7316 emit_int8(imm8 & 0x01); 7317 } 7318 7319 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 7320 assert(VM_Version::supports_evex(), ""); 7321 assert(imm8 <= 0x03, "imm8: %u", imm8); 7322 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7323 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7324 emit_int8(0x38); 7325 emit_int8((unsigned char)(0xC0 | encode)); 7326 // 0x00 - insert into q0 128 bits (0..127) 7327 // 0x01 - insert into q1 128 bits (128..255) 7328 // 0x02 - insert into q2 128 bits (256..383) 7329 // 0x03 - insert into q3 128 bits (384..511) 7330 emit_int8(imm8 & 0x03); 7331 } 7332 7333 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 7334 assert(VM_Version::supports_avx(), ""); 7335 assert(dst != xnoreg, "sanity"); 7336 assert(imm8 <= 0x03, "imm8: %u", imm8); 7337 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 7338 InstructionMark im(this); 7339 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7340 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 7341 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7342 emit_int8(0x18); 7343 emit_operand(dst, src); 7344 // 0x00 - insert into q0 128 bits (0..127) 7345 // 0x01 - insert into q1 128 bits (128..255) 7346 // 0x02 - insert into q2 128 bits (256..383) 7347 // 0x03 - insert into q3 128 bits (384..511) 7348 emit_int8(imm8 & 0x03); 7349 } 7350 7351 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 7352 assert(VM_Version::supports_evex(), ""); 7353 assert(imm8 <= 0x01, "imm8: %u", imm8); 7354 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7355 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7356 emit_int8(0x3A); 7357 emit_int8((unsigned char)(0xC0 | encode)); 7358 // 0x00 - insert into lower 256 bits 7359 // 0x01 - insert into upper 256 bits 7360 emit_int8(imm8 & 0x01); 7361 } 7362 7363 7364 // vinsertf forms 7365 7366 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 7367 assert(VM_Version::supports_avx(), ""); 7368 assert(imm8 <= 0x01, "imm8: %u", imm8); 7369 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; 7370 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7371 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7372 emit_int8(0x18); 7373 emit_int8((unsigned char)(0xC0 | encode)); 7374 // 0x00 - insert into lower 128 bits 7375 // 0x01 - insert into upper 128 bits 7376 emit_int8(imm8 & 0x01); 7377 } 7378 7379 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 7380 assert(VM_Version::supports_avx(), ""); 7381 assert(dst != xnoreg, "sanity"); 7382 assert(imm8 <= 0x01, "imm8: %u", imm8); 7383 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; 7384 InstructionMark im(this); 7385 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7386 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 7387 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7388 emit_int8(0x18); 7389 emit_operand(dst, src); 7390 // 0x00 - insert into lower 128 bits 7391 // 0x01 - insert into upper 128 bits 7392 emit_int8(imm8 & 0x01); 7393 } 7394 7395 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 7396 assert(VM_Version::supports_evex(), ""); 7397 assert(imm8 <= 0x03, "imm8: %u", imm8); 7398 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7399 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7400 emit_int8(0x18); 7401 emit_int8((unsigned char)(0xC0 | encode)); 7402 // 0x00 - insert into q0 128 bits (0..127) 7403 // 0x01 - insert into q1 128 bits (128..255) 7404 // 0x02 - insert into q2 128 bits (256..383) 7405 // 0x03 - insert into q3 128 bits (384..511) 7406 emit_int8(imm8 & 0x03); 7407 } 7408 7409 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 7410 assert(VM_Version::supports_avx(), ""); 7411 assert(dst != xnoreg, "sanity"); 7412 assert(imm8 <= 0x03, "imm8: %u", imm8); 7413 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 7414 InstructionMark im(this); 7415 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7416 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 7417 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7418 emit_int8(0x18); 7419 emit_operand(dst, src); 7420 // 0x00 - insert into q0 128 bits (0..127) 7421 // 0x01 - insert into q1 128 bits (128..255) 7422 // 0x02 - insert into q2 128 bits (256..383) 7423 // 0x03 - insert into q3 128 bits (384..511) 7424 emit_int8(imm8 & 0x03); 7425 } 7426 7427 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 7428 assert(VM_Version::supports_evex(), ""); 7429 assert(imm8 <= 0x01, "imm8: %u", imm8); 7430 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7431 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7432 emit_int8(0x1A); 7433 emit_int8((unsigned char)(0xC0 | encode)); 7434 // 0x00 - insert into lower 256 bits 7435 // 0x01 - insert into upper 256 bits 7436 emit_int8(imm8 & 0x01); 7437 } 7438 7439 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 7440 assert(VM_Version::supports_evex(), ""); 7441 assert(dst != xnoreg, "sanity"); 7442 assert(imm8 <= 0x01, "imm8: %u", imm8); 7443 InstructionMark im(this); 7444 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7445 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 7446 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7447 emit_int8(0x1A); 7448 emit_operand(dst, src); 7449 // 0x00 - insert into lower 256 bits 7450 // 0x01 - insert into upper 256 bits 7451 emit_int8(imm8 & 0x01); 7452 } 7453 7454 7455 // vextracti forms 7456 7457 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 7458 assert(VM_Version::supports_avx(), ""); 7459 assert(imm8 <= 0x01, "imm8: %u", imm8); 7460 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; 7461 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7462 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7463 emit_int8(0x39); 7464 emit_int8((unsigned char)(0xC0 | encode)); 7465 // 0x00 - extract from lower 128 bits 7466 // 0x01 - extract from upper 128 bits 7467 emit_int8(imm8 & 0x01); 7468 } 7469 7470 void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 7471 assert(VM_Version::supports_avx2(), ""); 7472 assert(src != xnoreg, "sanity"); 7473 assert(imm8 <= 0x01, "imm8: %u", imm8); 7474 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; 7475 InstructionMark im(this); 7476 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7477 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 7478 attributes.reset_is_clear_context(); 7479 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7480 emit_int8(0x39); 7481 emit_operand(src, dst); 7482 // 0x00 - extract from lower 128 bits 7483 // 0x01 - extract from upper 128 bits 7484 emit_int8(imm8 & 0x01); 7485 } 7486 7487 void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 7488 assert(VM_Version::supports_avx(), ""); 7489 assert(imm8 <= 0x03, "imm8: %u", imm8); 7490 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 7491 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7492 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7493 emit_int8(0x39); 7494 emit_int8((unsigned char)(0xC0 | encode)); 7495 // 0x00 - extract from bits 127:0 7496 // 0x01 - extract from bits 255:128 7497 // 0x02 - extract from bits 383:256 7498 // 0x03 - extract from bits 511:384 7499 emit_int8(imm8 & 0x03); 7500 } 7501 7502 void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) { 7503 assert(VM_Version::supports_evex(), ""); 7504 assert(src != xnoreg, "sanity"); 7505 assert(imm8 <= 0x03, "imm8: %u", imm8); 7506 InstructionMark im(this); 7507 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7508 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 7509 attributes.reset_is_clear_context(); 7510 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7511 emit_int8(0x39); 7512 emit_operand(src, dst); 7513 // 0x00 - extract from bits 127:0 7514 // 0x01 - extract from bits 255:128 7515 // 0x02 - extract from bits 383:256 7516 // 0x03 - extract from bits 511:384 7517 emit_int8(imm8 & 0x03); 7518 } 7519 7520 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 7521 assert(VM_Version::supports_avx512dq(), ""); 7522 assert(imm8 <= 0x03, "imm8: %u", imm8); 7523 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7524 attributes.set_is_evex_instruction(); 7525 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7526 emit_int8(0x39); 7527 emit_int8((unsigned char)(0xC0 | encode)); 7528 // 0x00 - extract from bits 127:0 7529 // 0x01 - extract from bits 255:128 7530 // 0x02 - extract from bits 383:256 7531 // 0x03 - extract from bits 511:384 7532 emit_int8(imm8 & 0x03); 7533 } 7534 7535 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 7536 assert(VM_Version::supports_evex(), ""); 7537 assert(imm8 <= 0x01, "imm8: %u", imm8); 7538 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7539 attributes.set_is_evex_instruction(); 7540 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7541 emit_int8(0x3B); 7542 emit_int8((unsigned char)(0xC0 | encode)); 7543 // 0x00 - extract from lower 256 bits 7544 // 0x01 - extract from upper 256 bits 7545 emit_int8(imm8 & 0x01); 7546 } 7547 7548 7549 // vextractf forms 7550 7551 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 7552 assert(VM_Version::supports_avx(), ""); 7553 assert(imm8 <= 0x01, "imm8: %u", imm8); 7554 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; 7555 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7556 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7557 emit_int8(0x19); 7558 emit_int8((unsigned char)(0xC0 | encode)); 7559 // 0x00 - extract from lower 128 bits 7560 // 0x01 - extract from upper 128 bits 7561 emit_int8(imm8 & 0x01); 7562 } 7563 7564 void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) { 7565 assert(VM_Version::supports_avx(), ""); 7566 assert(src != xnoreg, "sanity"); 7567 assert(imm8 <= 0x01, "imm8: %u", imm8); 7568 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; 7569 InstructionMark im(this); 7570 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7571 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 7572 attributes.reset_is_clear_context(); 7573 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7574 emit_int8(0x19); 7575 emit_operand(src, dst); 7576 // 0x00 - extract from lower 128 bits 7577 // 0x01 - extract from upper 128 bits 7578 emit_int8(imm8 & 0x01); 7579 } 7580 7581 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 7582 assert(VM_Version::supports_avx(), ""); 7583 assert(imm8 <= 0x03, "imm8: %u", imm8); 7584 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 7585 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7586 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7587 emit_int8(0x19); 7588 emit_int8((unsigned char)(0xC0 | encode)); 7589 // 0x00 - extract from bits 127:0 7590 // 0x01 - extract from bits 255:128 7591 // 0x02 - extract from bits 383:256 7592 // 0x03 - extract from bits 511:384 7593 emit_int8(imm8 & 0x03); 7594 } 7595 7596 void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) { 7597 assert(VM_Version::supports_evex(), ""); 7598 assert(src != xnoreg, "sanity"); 7599 assert(imm8 <= 0x03, "imm8: %u", imm8); 7600 InstructionMark im(this); 7601 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7602 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 7603 attributes.reset_is_clear_context(); 7604 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7605 emit_int8(0x19); 7606 emit_operand(src, dst); 7607 // 0x00 - extract from bits 127:0 7608 // 0x01 - extract from bits 255:128 7609 // 0x02 - extract from bits 383:256 7610 // 0x03 - extract from bits 511:384 7611 emit_int8(imm8 & 0x03); 7612 } 7613 7614 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 7615 assert(VM_Version::supports_avx512dq(), ""); 7616 assert(imm8 <= 0x03, "imm8: %u", imm8); 7617 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7618 attributes.set_is_evex_instruction(); 7619 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7620 emit_int8(0x19); 7621 emit_int8((unsigned char)(0xC0 | encode)); 7622 // 0x00 - extract from bits 127:0 7623 // 0x01 - extract from bits 255:128 7624 // 0x02 - extract from bits 383:256 7625 // 0x03 - extract from bits 511:384 7626 emit_int8(imm8 & 0x03); 7627 } 7628 7629 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 7630 assert(VM_Version::supports_evex(), ""); 7631 assert(imm8 <= 0x01, "imm8: %u", imm8); 7632 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7633 attributes.set_is_evex_instruction(); 7634 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7635 emit_int8(0x1B); 7636 emit_int8((unsigned char)(0xC0 | encode)); 7637 // 0x00 - extract from lower 256 bits 7638 // 0x01 - extract from upper 256 bits 7639 emit_int8(imm8 & 0x01); 7640 } 7641 7642 void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) { 7643 assert(VM_Version::supports_evex(), ""); 7644 assert(src != xnoreg, "sanity"); 7645 assert(imm8 <= 0x01, "imm8: %u", imm8); 7646 InstructionMark im(this); 7647 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 7648 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit); 7649 attributes.reset_is_clear_context(); 7650 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7651 emit_int8(0x1B); 7652 emit_operand(src, dst); 7653 // 0x00 - extract from lower 256 bits 7654 // 0x01 - extract from upper 256 bits 7655 emit_int8(imm8 & 0x01); 7656 } 7657 7658 7659 // legacy word/dword replicate 7660 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) { 7661 assert(VM_Version::supports_avx2(), ""); 7662 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7663 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7664 emit_int8(0x79); 7665 emit_int8((unsigned char)(0xC0 | encode)); 7666 } 7667 7668 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) { 7669 assert(VM_Version::supports_avx2(), ""); 7670 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7671 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7672 emit_int8(0x58); 7673 emit_int8((unsigned char)(0xC0 | encode)); 7674 } 7675 7676 7677 // xmm/mem sourced byte/word/dword/qword replicate 7678 7679 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 7680 void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { 7681 assert(VM_Version::supports_evex(), ""); 7682 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7683 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7684 emit_int8(0x78); 7685 emit_int8((unsigned char)(0xC0 | encode)); 7686 } 7687 7688 void Assembler::evpbroadcastb(XMMRegister dst, Address src, int vector_len) { 7689 assert(VM_Version::supports_evex(), ""); 7690 assert(dst != xnoreg, "sanity"); 7691 InstructionMark im(this); 7692 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7693 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 7694 // swap src<->dst for encoding 7695 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7696 emit_int8(0x78); 7697 emit_operand(dst, src); 7698 } 7699 7700 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 7701 void Assembler::evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 7702 assert(VM_Version::supports_evex(), ""); 7703 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7704 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7705 emit_int8(0x79); 7706 emit_int8((unsigned char)(0xC0 | encode)); 7707 } 7708 7709 void Assembler::evpbroadcastw(XMMRegister dst, Address src, int vector_len) { 7710 assert(VM_Version::supports_evex(), ""); 7711 assert(dst != xnoreg, "sanity"); 7712 InstructionMark im(this); 7713 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7714 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 7715 // swap src<->dst for encoding 7716 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7717 emit_int8(0x79); 7718 emit_operand(dst, src); 7719 } 7720 7721 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 7722 void Assembler::evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { 7723 assert(VM_Version::supports_evex(), ""); 7724 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7725 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7726 emit_int8(0x58); 7727 emit_int8((unsigned char)(0xC0 | encode)); 7728 } 7729 7730 void Assembler::evpbroadcastd(XMMRegister dst, Address src, int vector_len) { 7731 assert(VM_Version::supports_evex(), ""); 7732 assert(dst != xnoreg, "sanity"); 7733 InstructionMark im(this); 7734 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7735 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 7736 // swap src<->dst for encoding 7737 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7738 emit_int8(0x58); 7739 emit_operand(dst, src); 7740 } 7741 7742 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 7743 void Assembler::evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { 7744 assert(VM_Version::supports_evex(), ""); 7745 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7746 attributes.set_rex_vex_w_reverted(); 7747 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7748 emit_int8(0x59); 7749 emit_int8((unsigned char)(0xC0 | encode)); 7750 } 7751 7752 void Assembler::evpbroadcastq(XMMRegister dst, Address src, int vector_len) { 7753 assert(VM_Version::supports_evex(), ""); 7754 assert(dst != xnoreg, "sanity"); 7755 InstructionMark im(this); 7756 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7757 attributes.set_rex_vex_w_reverted(); 7758 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 7759 // swap src<->dst for encoding 7760 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7761 emit_int8(0x59); 7762 emit_operand(dst, src); 7763 } 7764 void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) { 7765 assert(vector_len != Assembler::AVX_128bit, ""); 7766 assert(VM_Version::supports_avx512dq(), ""); 7767 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7768 attributes.set_rex_vex_w_reverted(); 7769 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7770 emit_int8(0x5A); 7771 emit_int8((unsigned char)(0xC0 | encode)); 7772 } 7773 7774 void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) { 7775 assert(vector_len != Assembler::AVX_128bit, ""); 7776 assert(VM_Version::supports_avx512dq(), ""); 7777 assert(dst != xnoreg, "sanity"); 7778 InstructionMark im(this); 7779 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7780 attributes.set_rex_vex_w_reverted(); 7781 attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit); 7782 // swap src<->dst for encoding 7783 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7784 emit_int8(0x5A); 7785 emit_operand(dst, src); 7786 } 7787 7788 // scalar single/double precision replicate 7789 7790 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL 7791 void Assembler::evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { 7792 assert(VM_Version::supports_evex(), ""); 7793 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7794 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7795 emit_int8(0x18); 7796 emit_int8((unsigned char)(0xC0 | encode)); 7797 } 7798 7799 void Assembler::evpbroadcastss(XMMRegister dst, Address src, int vector_len) { 7800 assert(VM_Version::supports_evex(), ""); 7801 assert(dst != xnoreg, "sanity"); 7802 InstructionMark im(this); 7803 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7804 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 7805 // swap src<->dst for encoding 7806 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7807 emit_int8(0x18); 7808 emit_operand(dst, src); 7809 } 7810 7811 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL 7812 void Assembler::evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { 7813 assert(VM_Version::supports_evex(), ""); 7814 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7815 attributes.set_rex_vex_w_reverted(); 7816 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7817 emit_int8(0x19); 7818 emit_int8((unsigned char)(0xC0 | encode)); 7819 } 7820 7821 void Assembler::evpbroadcastsd(XMMRegister dst, Address src, int vector_len) { 7822 assert(VM_Version::supports_evex(), ""); 7823 assert(dst != xnoreg, "sanity"); 7824 InstructionMark im(this); 7825 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7826 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 7827 attributes.set_rex_vex_w_reverted(); 7828 // swap src<->dst for encoding 7829 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7830 emit_int8(0x19); 7831 emit_operand(dst, src); 7832 } 7833 7834 7835 // gpr source broadcast forms 7836 7837 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 7838 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { 7839 assert(VM_Version::supports_evex(), ""); 7840 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7841 attributes.set_is_evex_instruction(); 7842 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7843 emit_int8(0x7A); 7844 emit_int8((unsigned char)(0xC0 | encode)); 7845 } 7846 7847 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 7848 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { 7849 assert(VM_Version::supports_evex(), ""); 7850 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 7851 attributes.set_is_evex_instruction(); 7852 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7853 emit_int8(0x7B); 7854 emit_int8((unsigned char)(0xC0 | encode)); 7855 } 7856 7857 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 7858 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { 7859 assert(VM_Version::supports_evex(), ""); 7860 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7861 attributes.set_is_evex_instruction(); 7862 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7863 emit_int8(0x7C); 7864 emit_int8((unsigned char)(0xC0 | encode)); 7865 } 7866 7867 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 7868 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { 7869 assert(VM_Version::supports_evex(), ""); 7870 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7871 attributes.set_is_evex_instruction(); 7872 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7873 emit_int8(0x7C); 7874 emit_int8((unsigned char)(0xC0 | encode)); 7875 } 7876 7877 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) { 7878 assert(VM_Version::supports_evex(), ""); 7879 assert(dst != xnoreg, "sanity"); 7880 InstructionMark im(this); 7881 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 7882 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 7883 attributes.reset_is_clear_context(); 7884 attributes.set_embedded_opmask_register_specifier(mask); 7885 attributes.set_is_evex_instruction(); 7886 // swap src<->dst for encoding 7887 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7888 emit_int8((unsigned char)0x90); 7889 emit_operand(dst, src); 7890 } 7891 7892 // Carry-Less Multiplication Quadword 7893 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { 7894 assert(VM_Version::supports_clmul(), ""); 7895 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7896 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7897 emit_int8(0x44); 7898 emit_int8((unsigned char)(0xC0 | encode)); 7899 emit_int8((unsigned char)mask); 7900 } 7901 7902 // Carry-Less Multiplication Quadword 7903 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 7904 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 7905 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7906 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7907 emit_int8(0x44); 7908 emit_int8((unsigned char)(0xC0 | encode)); 7909 emit_int8((unsigned char)mask); 7910 } 7911 7912 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) { 7913 assert(VM_Version::supports_vpclmulqdq(), "Requires vector carryless multiplication support"); 7914 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 7915 attributes.set_is_evex_instruction(); 7916 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7917 emit_int8(0x44); 7918 emit_int8((unsigned char)(0xC0 | encode)); 7919 emit_int8((unsigned char)mask); 7920 } 7921 7922 void Assembler::vzeroupper() { 7923 if (VM_Version::supports_vzeroupper()) { 7924 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 7925 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7926 emit_int8(0x77); 7927 } 7928 } 7929 7930 #ifndef _LP64 7931 // 32bit only pieces of the assembler 7932 7933 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 7934 // NO PREFIX AS NEVER 64BIT 7935 InstructionMark im(this); 7936 emit_int8((unsigned char)0x81); 7937 emit_int8((unsigned char)(0xF8 | src1->encoding())); 7938 emit_data(imm32, rspec, 0); 7939 } 7940 7941 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { 7942 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs 7943 InstructionMark im(this); 7944 emit_int8((unsigned char)0x81); 7945 emit_operand(rdi, src1); 7946 emit_data(imm32, rspec, 0); 7947 } 7948 7949 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, 7950 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded 7951 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. 7952 void Assembler::cmpxchg8(Address adr) { 7953 InstructionMark im(this); 7954 emit_int8(0x0F); 7955 emit_int8((unsigned char)0xC7); 7956 emit_operand(rcx, adr); 7957 } 7958 7959 void Assembler::decl(Register dst) { 7960 // Don't use it directly. Use MacroAssembler::decrementl() instead. 7961 emit_int8(0x48 | dst->encoding()); 7962 } 7963 7964 #endif // _LP64 7965 7966 // 64bit typically doesn't use the x87 but needs to for the trig funcs 7967 7968 void Assembler::fabs() { 7969 emit_int8((unsigned char)0xD9); 7970 emit_int8((unsigned char)0xE1); 7971 } 7972 7973 void Assembler::fadd(int i) { 7974 emit_farith(0xD8, 0xC0, i); 7975 } 7976 7977 void Assembler::fadd_d(Address src) { 7978 InstructionMark im(this); 7979 emit_int8((unsigned char)0xDC); 7980 emit_operand32(rax, src); 7981 } 7982 7983 void Assembler::fadd_s(Address src) { 7984 InstructionMark im(this); 7985 emit_int8((unsigned char)0xD8); 7986 emit_operand32(rax, src); 7987 } 7988 7989 void Assembler::fadda(int i) { 7990 emit_farith(0xDC, 0xC0, i); 7991 } 7992 7993 void Assembler::faddp(int i) { 7994 emit_farith(0xDE, 0xC0, i); 7995 } 7996 7997 void Assembler::fchs() { 7998 emit_int8((unsigned char)0xD9); 7999 emit_int8((unsigned char)0xE0); 8000 } 8001 8002 void Assembler::fcom(int i) { 8003 emit_farith(0xD8, 0xD0, i); 8004 } 8005 8006 void Assembler::fcomp(int i) { 8007 emit_farith(0xD8, 0xD8, i); 8008 } 8009 8010 void Assembler::fcomp_d(Address src) { 8011 InstructionMark im(this); 8012 emit_int8((unsigned char)0xDC); 8013 emit_operand32(rbx, src); 8014 } 8015 8016 void Assembler::fcomp_s(Address src) { 8017 InstructionMark im(this); 8018 emit_int8((unsigned char)0xD8); 8019 emit_operand32(rbx, src); 8020 } 8021 8022 void Assembler::fcompp() { 8023 emit_int8((unsigned char)0xDE); 8024 emit_int8((unsigned char)0xD9); 8025 } 8026 8027 void Assembler::fcos() { 8028 emit_int8((unsigned char)0xD9); 8029 emit_int8((unsigned char)0xFF); 8030 } 8031 8032 void Assembler::fdecstp() { 8033 emit_int8((unsigned char)0xD9); 8034 emit_int8((unsigned char)0xF6); 8035 } 8036 8037 void Assembler::fdiv(int i) { 8038 emit_farith(0xD8, 0xF0, i); 8039 } 8040 8041 void Assembler::fdiv_d(Address src) { 8042 InstructionMark im(this); 8043 emit_int8((unsigned char)0xDC); 8044 emit_operand32(rsi, src); 8045 } 8046 8047 void Assembler::fdiv_s(Address src) { 8048 InstructionMark im(this); 8049 emit_int8((unsigned char)0xD8); 8050 emit_operand32(rsi, src); 8051 } 8052 8053 void Assembler::fdiva(int i) { 8054 emit_farith(0xDC, 0xF8, i); 8055 } 8056 8057 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) 8058 // is erroneous for some of the floating-point instructions below. 8059 8060 void Assembler::fdivp(int i) { 8061 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) 8062 } 8063 8064 void Assembler::fdivr(int i) { 8065 emit_farith(0xD8, 0xF8, i); 8066 } 8067 8068 void Assembler::fdivr_d(Address src) { 8069 InstructionMark im(this); 8070 emit_int8((unsigned char)0xDC); 8071 emit_operand32(rdi, src); 8072 } 8073 8074 void Assembler::fdivr_s(Address src) { 8075 InstructionMark im(this); 8076 emit_int8((unsigned char)0xD8); 8077 emit_operand32(rdi, src); 8078 } 8079 8080 void Assembler::fdivra(int i) { 8081 emit_farith(0xDC, 0xF0, i); 8082 } 8083 8084 void Assembler::fdivrp(int i) { 8085 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) 8086 } 8087 8088 void Assembler::ffree(int i) { 8089 emit_farith(0xDD, 0xC0, i); 8090 } 8091 8092 void Assembler::fild_d(Address adr) { 8093 InstructionMark im(this); 8094 emit_int8((unsigned char)0xDF); 8095 emit_operand32(rbp, adr); 8096 } 8097 8098 void Assembler::fild_s(Address adr) { 8099 InstructionMark im(this); 8100 emit_int8((unsigned char)0xDB); 8101 emit_operand32(rax, adr); 8102 } 8103 8104 void Assembler::fincstp() { 8105 emit_int8((unsigned char)0xD9); 8106 emit_int8((unsigned char)0xF7); 8107 } 8108 8109 void Assembler::finit() { 8110 emit_int8((unsigned char)0x9B); 8111 emit_int8((unsigned char)0xDB); 8112 emit_int8((unsigned char)0xE3); 8113 } 8114 8115 void Assembler::fist_s(Address adr) { 8116 InstructionMark im(this); 8117 emit_int8((unsigned char)0xDB); 8118 emit_operand32(rdx, adr); 8119 } 8120 8121 void Assembler::fistp_d(Address adr) { 8122 InstructionMark im(this); 8123 emit_int8((unsigned char)0xDF); 8124 emit_operand32(rdi, adr); 8125 } 8126 8127 void Assembler::fistp_s(Address adr) { 8128 InstructionMark im(this); 8129 emit_int8((unsigned char)0xDB); 8130 emit_operand32(rbx, adr); 8131 } 8132 8133 void Assembler::fld1() { 8134 emit_int8((unsigned char)0xD9); 8135 emit_int8((unsigned char)0xE8); 8136 } 8137 8138 void Assembler::fld_d(Address adr) { 8139 InstructionMark im(this); 8140 emit_int8((unsigned char)0xDD); 8141 emit_operand32(rax, adr); 8142 } 8143 8144 void Assembler::fld_s(Address adr) { 8145 InstructionMark im(this); 8146 emit_int8((unsigned char)0xD9); 8147 emit_operand32(rax, adr); 8148 } 8149 8150 8151 void Assembler::fld_s(int index) { 8152 emit_farith(0xD9, 0xC0, index); 8153 } 8154 8155 void Assembler::fld_x(Address adr) { 8156 InstructionMark im(this); 8157 emit_int8((unsigned char)0xDB); 8158 emit_operand32(rbp, adr); 8159 } 8160 8161 void Assembler::fldcw(Address src) { 8162 InstructionMark im(this); 8163 emit_int8((unsigned char)0xD9); 8164 emit_operand32(rbp, src); 8165 } 8166 8167 void Assembler::fldenv(Address src) { 8168 InstructionMark im(this); 8169 emit_int8((unsigned char)0xD9); 8170 emit_operand32(rsp, src); 8171 } 8172 8173 void Assembler::fldlg2() { 8174 emit_int8((unsigned char)0xD9); 8175 emit_int8((unsigned char)0xEC); 8176 } 8177 8178 void Assembler::fldln2() { 8179 emit_int8((unsigned char)0xD9); 8180 emit_int8((unsigned char)0xED); 8181 } 8182 8183 void Assembler::fldz() { 8184 emit_int8((unsigned char)0xD9); 8185 emit_int8((unsigned char)0xEE); 8186 } 8187 8188 void Assembler::flog() { 8189 fldln2(); 8190 fxch(); 8191 fyl2x(); 8192 } 8193 8194 void Assembler::flog10() { 8195 fldlg2(); 8196 fxch(); 8197 fyl2x(); 8198 } 8199 8200 void Assembler::fmul(int i) { 8201 emit_farith(0xD8, 0xC8, i); 8202 } 8203 8204 void Assembler::fmul_d(Address src) { 8205 InstructionMark im(this); 8206 emit_int8((unsigned char)0xDC); 8207 emit_operand32(rcx, src); 8208 } 8209 8210 void Assembler::fmul_s(Address src) { 8211 InstructionMark im(this); 8212 emit_int8((unsigned char)0xD8); 8213 emit_operand32(rcx, src); 8214 } 8215 8216 void Assembler::fmula(int i) { 8217 emit_farith(0xDC, 0xC8, i); 8218 } 8219 8220 void Assembler::fmulp(int i) { 8221 emit_farith(0xDE, 0xC8, i); 8222 } 8223 8224 void Assembler::fnsave(Address dst) { 8225 InstructionMark im(this); 8226 emit_int8((unsigned char)0xDD); 8227 emit_operand32(rsi, dst); 8228 } 8229 8230 void Assembler::fnstcw(Address src) { 8231 InstructionMark im(this); 8232 emit_int8((unsigned char)0x9B); 8233 emit_int8((unsigned char)0xD9); 8234 emit_operand32(rdi, src); 8235 } 8236 8237 void Assembler::fnstsw_ax() { 8238 emit_int8((unsigned char)0xDF); 8239 emit_int8((unsigned char)0xE0); 8240 } 8241 8242 void Assembler::fprem() { 8243 emit_int8((unsigned char)0xD9); 8244 emit_int8((unsigned char)0xF8); 8245 } 8246 8247 void Assembler::fprem1() { 8248 emit_int8((unsigned char)0xD9); 8249 emit_int8((unsigned char)0xF5); 8250 } 8251 8252 void Assembler::frstor(Address src) { 8253 InstructionMark im(this); 8254 emit_int8((unsigned char)0xDD); 8255 emit_operand32(rsp, src); 8256 } 8257 8258 void Assembler::fsin() { 8259 emit_int8((unsigned char)0xD9); 8260 emit_int8((unsigned char)0xFE); 8261 } 8262 8263 void Assembler::fsqrt() { 8264 emit_int8((unsigned char)0xD9); 8265 emit_int8((unsigned char)0xFA); 8266 } 8267 8268 void Assembler::fst_d(Address adr) { 8269 InstructionMark im(this); 8270 emit_int8((unsigned char)0xDD); 8271 emit_operand32(rdx, adr); 8272 } 8273 8274 void Assembler::fst_s(Address adr) { 8275 InstructionMark im(this); 8276 emit_int8((unsigned char)0xD9); 8277 emit_operand32(rdx, adr); 8278 } 8279 8280 void Assembler::fstp_d(Address adr) { 8281 InstructionMark im(this); 8282 emit_int8((unsigned char)0xDD); 8283 emit_operand32(rbx, adr); 8284 } 8285 8286 void Assembler::fstp_d(int index) { 8287 emit_farith(0xDD, 0xD8, index); 8288 } 8289 8290 void Assembler::fstp_s(Address adr) { 8291 InstructionMark im(this); 8292 emit_int8((unsigned char)0xD9); 8293 emit_operand32(rbx, adr); 8294 } 8295 8296 void Assembler::fstp_x(Address adr) { 8297 InstructionMark im(this); 8298 emit_int8((unsigned char)0xDB); 8299 emit_operand32(rdi, adr); 8300 } 8301 8302 void Assembler::fsub(int i) { 8303 emit_farith(0xD8, 0xE0, i); 8304 } 8305 8306 void Assembler::fsub_d(Address src) { 8307 InstructionMark im(this); 8308 emit_int8((unsigned char)0xDC); 8309 emit_operand32(rsp, src); 8310 } 8311 8312 void Assembler::fsub_s(Address src) { 8313 InstructionMark im(this); 8314 emit_int8((unsigned char)0xD8); 8315 emit_operand32(rsp, src); 8316 } 8317 8318 void Assembler::fsuba(int i) { 8319 emit_farith(0xDC, 0xE8, i); 8320 } 8321 8322 void Assembler::fsubp(int i) { 8323 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) 8324 } 8325 8326 void Assembler::fsubr(int i) { 8327 emit_farith(0xD8, 0xE8, i); 8328 } 8329 8330 void Assembler::fsubr_d(Address src) { 8331 InstructionMark im(this); 8332 emit_int8((unsigned char)0xDC); 8333 emit_operand32(rbp, src); 8334 } 8335 8336 void Assembler::fsubr_s(Address src) { 8337 InstructionMark im(this); 8338 emit_int8((unsigned char)0xD8); 8339 emit_operand32(rbp, src); 8340 } 8341 8342 void Assembler::fsubra(int i) { 8343 emit_farith(0xDC, 0xE0, i); 8344 } 8345 8346 void Assembler::fsubrp(int i) { 8347 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) 8348 } 8349 8350 void Assembler::ftan() { 8351 emit_int8((unsigned char)0xD9); 8352 emit_int8((unsigned char)0xF2); 8353 emit_int8((unsigned char)0xDD); 8354 emit_int8((unsigned char)0xD8); 8355 } 8356 8357 void Assembler::ftst() { 8358 emit_int8((unsigned char)0xD9); 8359 emit_int8((unsigned char)0xE4); 8360 } 8361 8362 void Assembler::fucomi(int i) { 8363 // make sure the instruction is supported (introduced for P6, together with cmov) 8364 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 8365 emit_farith(0xDB, 0xE8, i); 8366 } 8367 8368 void Assembler::fucomip(int i) { 8369 // make sure the instruction is supported (introduced for P6, together with cmov) 8370 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 8371 emit_farith(0xDF, 0xE8, i); 8372 } 8373 8374 void Assembler::fwait() { 8375 emit_int8((unsigned char)0x9B); 8376 } 8377 8378 void Assembler::fxch(int i) { 8379 emit_farith(0xD9, 0xC8, i); 8380 } 8381 8382 void Assembler::fyl2x() { 8383 emit_int8((unsigned char)0xD9); 8384 emit_int8((unsigned char)0xF1); 8385 } 8386 8387 void Assembler::frndint() { 8388 emit_int8((unsigned char)0xD9); 8389 emit_int8((unsigned char)0xFC); 8390 } 8391 8392 void Assembler::f2xm1() { 8393 emit_int8((unsigned char)0xD9); 8394 emit_int8((unsigned char)0xF0); 8395 } 8396 8397 void Assembler::fldl2e() { 8398 emit_int8((unsigned char)0xD9); 8399 emit_int8((unsigned char)0xEA); 8400 } 8401 8402 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. 8403 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; 8404 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding. 8405 static int simd_opc[4] = { 0, 0, 0x38, 0x3A }; 8406 8407 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding. 8408 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 8409 if (pre > 0) { 8410 emit_int8(simd_pre[pre]); 8411 } 8412 if (rex_w) { 8413 prefixq(adr, xreg); 8414 } else { 8415 prefix(adr, xreg); 8416 } 8417 if (opc > 0) { 8418 emit_int8(0x0F); 8419 int opc2 = simd_opc[opc]; 8420 if (opc2 > 0) { 8421 emit_int8(opc2); 8422 } 8423 } 8424 } 8425 8426 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 8427 if (pre > 0) { 8428 emit_int8(simd_pre[pre]); 8429 } 8430 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc); 8431 if (opc > 0) { 8432 emit_int8(0x0F); 8433 int opc2 = simd_opc[opc]; 8434 if (opc2 > 0) { 8435 emit_int8(opc2); 8436 } 8437 } 8438 return encode; 8439 } 8440 8441 8442 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) { 8443 int vector_len = _attributes->get_vector_len(); 8444 bool vex_w = _attributes->is_rex_vex_w(); 8445 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { 8446 prefix(VEX_3bytes); 8447 8448 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); 8449 byte1 = (~byte1) & 0xE0; 8450 byte1 |= opc; 8451 emit_int8(byte1); 8452 8453 int byte2 = ((~nds_enc) & 0xf) << 3; 8454 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; 8455 emit_int8(byte2); 8456 } else { 8457 prefix(VEX_2bytes); 8458 8459 int byte1 = vex_r ? VEX_R : 0; 8460 byte1 = (~byte1) & 0x80; 8461 byte1 |= ((~nds_enc) & 0xf) << 3; 8462 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; 8463 emit_int8(byte1); 8464 } 8465 } 8466 8467 // This is a 4 byte encoding 8468 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, int nds_enc, VexSimdPrefix pre, VexOpcode opc){ 8469 // EVEX 0x62 prefix 8470 prefix(EVEX_4bytes); 8471 bool vex_w = _attributes->is_rex_vex_w(); 8472 int evex_encoding = (vex_w ? VEX_W : 0); 8473 // EVEX.b is not currently used for broadcast of single element or data rounding modes 8474 _attributes->set_evex_encoding(evex_encoding); 8475 8476 // P0: byte 2, initialized to RXBR`00mm 8477 // instead of not'd 8478 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); 8479 byte2 = (~byte2) & 0xF0; 8480 // confine opc opcode extensions in mm bits to lower two bits 8481 // of form {0F, 0F_38, 0F_3A} 8482 byte2 |= opc; 8483 emit_int8(byte2); 8484 8485 // P1: byte 3 as Wvvvv1pp 8486 int byte3 = ((~nds_enc) & 0xf) << 3; 8487 // p[10] is always 1 8488 byte3 |= EVEX_F; 8489 byte3 |= (vex_w & 1) << 7; 8490 // confine pre opcode extensions in pp bits to lower two bits 8491 // of form {66, F3, F2} 8492 byte3 |= pre; 8493 emit_int8(byte3); 8494 8495 // P2: byte 4 as zL'Lbv'aaa 8496 // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now) 8497 int byte4 = (_attributes->is_no_reg_mask()) ? 8498 0 : 8499 _attributes->get_embedded_opmask_register_specifier(); 8500 // EVEX.v` for extending EVEX.vvvv or VIDX 8501 byte4 |= (evex_v ? 0: EVEX_V); 8502 // third EXEC.b for broadcast actions 8503 byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0); 8504 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024 8505 byte4 |= ((_attributes->get_vector_len())& 0x3) << 5; 8506 // last is EVEX.z for zero/merge actions 8507 if (_attributes->is_no_reg_mask() == false) { 8508 byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0); 8509 } 8510 emit_int8(byte4); 8511 } 8512 8513 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 8514 bool vex_r = ((xreg_enc & 8) == 8) ? 1 : 0; 8515 bool vex_b = adr.base_needs_rex(); 8516 bool vex_x; 8517 if (adr.isxmmindex()) { 8518 vex_x = adr.xmmindex_needs_rex(); 8519 } else { 8520 vex_x = adr.index_needs_rex(); 8521 } 8522 set_attributes(attributes); 8523 attributes->set_current_assembler(this); 8524 8525 // if vector length is turned off, revert to AVX for vectors smaller than 512-bit 8526 if (UseAVX > 2 && _legacy_mode_vl && attributes->uses_vl()) { 8527 switch (attributes->get_vector_len()) { 8528 case AVX_128bit: 8529 case AVX_256bit: 8530 attributes->set_is_legacy_mode(); 8531 break; 8532 } 8533 } 8534 8535 // For pure EVEX check and see if this instruction 8536 // is allowed in legacy mode and has resources which will 8537 // fit in it. Pure EVEX instructions will use set_is_evex_instruction in their definition, 8538 // else that field is set when we encode to EVEX 8539 if (UseAVX > 2 && !attributes->is_legacy_mode() && 8540 !_is_managed && !attributes->is_evex_instruction()) { 8541 if (!_legacy_mode_vl && attributes->get_vector_len() != AVX_512bit) { 8542 bool check_register_bank = NOT_IA32(true) IA32_ONLY(false); 8543 if (check_register_bank) { 8544 // check nds_enc and xreg_enc for upper bank usage 8545 if (nds_enc < 16 && xreg_enc < 16) { 8546 attributes->set_is_legacy_mode(); 8547 } 8548 } else { 8549 attributes->set_is_legacy_mode(); 8550 } 8551 } 8552 } 8553 8554 _is_managed = false; 8555 if (UseAVX > 2 && !attributes->is_legacy_mode()) 8556 { 8557 bool evex_r = (xreg_enc >= 16); 8558 bool evex_v; 8559 // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31) 8560 if (adr.isxmmindex()) { 8561 evex_v = ((adr._xmmindex->encoding() > 15) ? true : false); 8562 } else { 8563 evex_v = (nds_enc >= 16); 8564 } 8565 attributes->set_is_evex_instruction(); 8566 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 8567 } else { 8568 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 8569 attributes->set_rex_vex_w(false); 8570 } 8571 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 8572 } 8573 } 8574 8575 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 8576 bool vex_r = ((dst_enc & 8) == 8) ? 1 : 0; 8577 bool vex_b = ((src_enc & 8) == 8) ? 1 : 0; 8578 bool vex_x = false; 8579 set_attributes(attributes); 8580 attributes->set_current_assembler(this); 8581 bool check_register_bank = NOT_IA32(true) IA32_ONLY(false); 8582 8583 // if vector length is turned off, revert to AVX for vectors smaller than 512-bit 8584 if (UseAVX > 2 && _legacy_mode_vl && attributes->uses_vl()) { 8585 switch (attributes->get_vector_len()) { 8586 case AVX_128bit: 8587 case AVX_256bit: 8588 if (check_register_bank) { 8589 if (dst_enc >= 16 || nds_enc >= 16 || src_enc >= 16) { 8590 // up propagate arithmetic instructions to meet RA requirements 8591 attributes->set_vector_len(AVX_512bit); 8592 } else { 8593 attributes->set_is_legacy_mode(); 8594 } 8595 } else { 8596 attributes->set_is_legacy_mode(); 8597 } 8598 break; 8599 } 8600 } 8601 8602 // For pure EVEX check and see if this instruction 8603 // is allowed in legacy mode and has resources which will 8604 // fit in it. Pure EVEX instructions will use set_is_evex_instruction in their definition, 8605 // else that field is set when we encode to EVEX 8606 if (UseAVX > 2 && !attributes->is_legacy_mode() && 8607 !_is_managed && !attributes->is_evex_instruction()) { 8608 if (!_legacy_mode_vl && attributes->get_vector_len() != AVX_512bit) { 8609 if (check_register_bank) { 8610 // check dst_enc, nds_enc and src_enc for upper bank usage 8611 if (dst_enc < 16 && nds_enc < 16 && src_enc < 16) { 8612 attributes->set_is_legacy_mode(); 8613 } 8614 } else { 8615 attributes->set_is_legacy_mode(); 8616 } 8617 } 8618 } 8619 8620 _is_managed = false; 8621 if (UseAVX > 2 && !attributes->is_legacy_mode()) 8622 { 8623 bool evex_r = (dst_enc >= 16); 8624 bool evex_v = (nds_enc >= 16); 8625 // can use vex_x as bank extender on rm encoding 8626 vex_x = (src_enc >= 16); 8627 attributes->set_is_evex_instruction(); 8628 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 8629 } else { 8630 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 8631 attributes->set_rex_vex_w(false); 8632 } 8633 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 8634 } 8635 8636 // return modrm byte components for operands 8637 return (((dst_enc & 7) << 3) | (src_enc & 7)); 8638 } 8639 8640 8641 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 8642 VexOpcode opc, InstructionAttr *attributes) { 8643 if (UseAVX > 0) { 8644 int xreg_enc = xreg->encoding(); 8645 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 8646 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, attributes); 8647 } else { 8648 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); 8649 rex_prefix(adr, xreg, pre, opc, attributes->is_rex_vex_w()); 8650 } 8651 } 8652 8653 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 8654 VexOpcode opc, InstructionAttr *attributes) { 8655 int dst_enc = dst->encoding(); 8656 int src_enc = src->encoding(); 8657 if (UseAVX > 0) { 8658 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 8659 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes); 8660 } else { 8661 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); 8662 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, attributes->is_rex_vex_w()); 8663 } 8664 } 8665 8666 void Assembler::vcmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { 8667 // This encoding is upto AVX2 8668 assert(VM_Version::supports_avx(), ""); 8669 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 8670 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8671 emit_int8((unsigned char)0xC2); 8672 emit_int8((unsigned char)(0xC0 | encode)); 8673 emit_int8((unsigned char)(0xF & cop)); 8674 } 8675 8676 void Assembler::vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 8677 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 8678 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 8679 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8680 emit_int8((unsigned char)0x4B); 8681 emit_int8((unsigned char)(0xC0 | encode)); 8682 int src2_enc = src2->encoding(); 8683 emit_int8((unsigned char)(0xF0 & src2_enc<<4)); 8684 } 8685 8686 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 8687 assert(VM_Version::supports_avx2(), ""); 8688 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 8689 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8690 emit_int8((unsigned char)0x02); 8691 emit_int8((unsigned char)(0xC0 | encode)); 8692 emit_int8((unsigned char)imm8); 8693 } 8694 8695 void Assembler::vcmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int comparison, int vector_len) { 8696 assert(VM_Version::supports_avx(), ""); 8697 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8698 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8699 emit_int8((unsigned char)0xC2); 8700 emit_int8((unsigned char)(0xC0 | encode)); 8701 emit_int8((unsigned char)comparison); 8702 } 8703 8704 void Assembler::evcmpps(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 8705 ComparisonPredicateFP comparison, int vector_len) { 8706 assert(VM_Version::supports_evex(), ""); 8707 // Encoding: EVEX.NDS.XXX.0F.W0 C2 /r ib 8708 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8709 attributes.set_is_evex_instruction(); 8710 attributes.set_embedded_opmask_register_specifier(mask); 8711 attributes.reset_is_clear_context(); 8712 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 8713 emit_int8((unsigned char)0xC2); 8714 emit_int8((unsigned char)(0xC0 | encode)); 8715 emit_int8((unsigned char)comparison); 8716 } 8717 8718 void Assembler::evcmppd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 8719 ComparisonPredicateFP comparison, int vector_len) { 8720 assert(VM_Version::supports_evex(), ""); 8721 // Encoding: EVEX.NDS.XXX.66.0F.W1 C2 /r ib 8722 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8723 attributes.set_is_evex_instruction(); 8724 attributes.set_embedded_opmask_register_specifier(mask); 8725 attributes.reset_is_clear_context(); 8726 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8727 emit_int8((unsigned char)0xC2); 8728 emit_int8((unsigned char)(0xC0 | encode)); 8729 emit_int8((unsigned char)comparison); 8730 } 8731 8732 void Assembler::blendvps(XMMRegister dst, XMMRegister src) { 8733 assert(VM_Version::supports_sse4_1(), ""); 8734 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding"); 8735 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 8736 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8737 emit_int8(0x14); 8738 emit_int8((unsigned char)(0xC0 | encode)); 8739 } 8740 8741 void Assembler::blendvpd(XMMRegister dst, XMMRegister src) { 8742 assert(VM_Version::supports_sse4_1(), ""); 8743 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding"); 8744 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 8745 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8746 emit_int8(0x15); 8747 emit_int8((unsigned char)(0xC0 | encode)); 8748 } 8749 8750 void Assembler::pblendvb(XMMRegister dst, XMMRegister src) { 8751 assert(VM_Version::supports_sse4_1(), ""); 8752 assert(UseAVX <= 0, "sse encoding is inconsistent with avx encoding"); 8753 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8754 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8755 emit_int8(0x10); 8756 emit_int8((unsigned char)(0xC0 | encode)); 8757 } 8758 8759 void Assembler::vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) { 8760 assert(UseAVX > 0 && (vector_len == AVX_128bit || vector_len == AVX_256bit), ""); 8761 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8762 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8763 emit_int8((unsigned char)0x4A); 8764 emit_int8((unsigned char)(0xC0 | encode)); 8765 int mask_enc = mask->encoding(); 8766 emit_int8((unsigned char)(0xF0 & mask_enc<<4)); 8767 } 8768 8769 void Assembler::vpcmpgtb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8770 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 8771 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 8772 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 8773 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8774 emit_int8((unsigned char)0x64); 8775 emit_int8((unsigned char)(0xC0 | encode)); 8776 } 8777 8778 void Assembler::vpcmpgtw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8779 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 8780 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 8781 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 8782 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8783 emit_int8((unsigned char)0x65); 8784 emit_int8((unsigned char)(0xC0 | encode)); 8785 } 8786 8787 void Assembler::vpcmpgtd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8788 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 8789 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 8790 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 8791 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8792 emit_int8((unsigned char)0x66); 8793 emit_int8((unsigned char)(0xC0 | encode)); 8794 } 8795 8796 void Assembler::vpcmpgtq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 8797 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : VM_Version::supports_avx2(), ""); 8798 assert(vector_len <= AVX_256bit, "evex encoding is different - has k register as dest"); 8799 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 8800 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8801 emit_int8((unsigned char)0x37); 8802 emit_int8((unsigned char)(0xC0 | encode)); 8803 } 8804 8805 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 8806 int comparison, int vector_len) { 8807 assert(VM_Version::supports_evex(), ""); 8808 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 8809 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib 8810 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8811 attributes.set_is_evex_instruction(); 8812 attributes.set_embedded_opmask_register_specifier(mask); 8813 attributes.reset_is_clear_context(); 8814 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8815 emit_int8((unsigned char)0x1F); 8816 emit_int8((unsigned char)(0xC0 | encode)); 8817 emit_int8((unsigned char)comparison); 8818 } 8819 8820 void Assembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 8821 int comparison, int vector_len) { 8822 assert(VM_Version::supports_evex(), ""); 8823 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 8824 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 1F /r ib 8825 InstructionMark im(this); 8826 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8827 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit); 8828 attributes.set_is_evex_instruction(); 8829 attributes.set_embedded_opmask_register_specifier(mask); 8830 attributes.reset_is_clear_context(); 8831 int dst_enc = kdst->encoding(); 8832 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8833 emit_int8((unsigned char)0x1F); 8834 emit_operand(as_Register(dst_enc), src); 8835 emit_int8((unsigned char)comparison); 8836 } 8837 8838 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 8839 int comparison, int vector_len) { 8840 assert(VM_Version::supports_evex(), ""); 8841 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 8842 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib 8843 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8844 attributes.set_is_evex_instruction(); 8845 attributes.set_embedded_opmask_register_specifier(mask); 8846 attributes.reset_is_clear_context(); 8847 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8848 emit_int8((unsigned char)0x1F); 8849 emit_int8((unsigned char)(0xC0 | encode)); 8850 emit_int8((unsigned char)comparison); 8851 } 8852 8853 void Assembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 8854 int comparison, int vector_len) { 8855 assert(VM_Version::supports_evex(), ""); 8856 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 8857 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 1F /r ib 8858 InstructionMark im(this); 8859 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8860 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit); 8861 attributes.set_is_evex_instruction(); 8862 attributes.set_embedded_opmask_register_specifier(mask); 8863 attributes.reset_is_clear_context(); 8864 int dst_enc = kdst->encoding(); 8865 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8866 emit_int8((unsigned char)0x1F); 8867 emit_operand(as_Register(dst_enc), src); 8868 emit_int8((unsigned char)comparison); 8869 } 8870 8871 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 8872 int comparison, int vector_len) { 8873 assert(VM_Version::supports_evex(), ""); 8874 assert(VM_Version::supports_avx512bw(), ""); 8875 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 8876 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib 8877 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8878 attributes.set_is_evex_instruction(); 8879 attributes.set_embedded_opmask_register_specifier(mask); 8880 attributes.reset_is_clear_context(); 8881 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8882 emit_int8((unsigned char)0x3F); 8883 emit_int8((unsigned char)(0xC0 | encode)); 8884 emit_int8((unsigned char)comparison); 8885 } 8886 8887 void Assembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 8888 int comparison, int vector_len) { 8889 assert(VM_Version::supports_evex(), ""); 8890 assert(VM_Version::supports_avx512bw(), ""); 8891 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 8892 // Encoding: EVEX.NDS.XXX.66.0F3A.W0 3F /r ib 8893 InstructionMark im(this); 8894 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8895 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 8896 attributes.set_is_evex_instruction(); 8897 attributes.set_embedded_opmask_register_specifier(mask); 8898 attributes.reset_is_clear_context(); 8899 int dst_enc = kdst->encoding(); 8900 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8901 emit_int8((unsigned char)0x3F); 8902 emit_operand(as_Register(dst_enc), src); 8903 emit_int8((unsigned char)comparison); 8904 } 8905 8906 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 8907 int comparison, int vector_len) { 8908 assert(VM_Version::supports_evex(), ""); 8909 assert(VM_Version::supports_avx512bw(), ""); 8910 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 8911 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib 8912 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8913 attributes.set_is_evex_instruction(); 8914 attributes.set_embedded_opmask_register_specifier(mask); 8915 attributes.reset_is_clear_context(); 8916 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8917 emit_int8((unsigned char)0x3F); 8918 emit_int8((unsigned char)(0xC0 | encode)); 8919 emit_int8((unsigned char)comparison); 8920 } 8921 8922 void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 8923 int comparison, int vector_len) { 8924 assert(VM_Version::supports_evex(), ""); 8925 assert(VM_Version::supports_avx512bw(), ""); 8926 assert(comparison >= Assembler::eq && comparison <= Assembler::_true, ""); 8927 // Encoding: EVEX.NDS.XXX.66.0F3A.W1 3F /r ib 8928 InstructionMark im(this); 8929 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8930 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 8931 attributes.set_is_evex_instruction(); 8932 attributes.set_embedded_opmask_register_specifier(mask); 8933 attributes.reset_is_clear_context(); 8934 int dst_enc = kdst->encoding(); 8935 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8936 emit_int8((unsigned char)0x3F); 8937 emit_operand(as_Register(dst_enc), src); 8938 emit_int8((unsigned char)comparison); 8939 } 8940 8941 void Assembler::vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len) { 8942 assert(VM_Version::supports_avx(), ""); 8943 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8944 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 8945 emit_int8((unsigned char)0x4C); 8946 emit_int8((unsigned char)(0xC0 | encode)); 8947 int mask_enc = mask->encoding(); 8948 emit_int8((unsigned char)(0xF0 & mask_enc << 4)); 8949 } 8950 8951 void Assembler::evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8952 assert(VM_Version::supports_evex(), ""); 8953 // Encoding: EVEX.NDS.XXX.66.0F38.W1 65 /r 8954 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8955 attributes.set_is_evex_instruction(); 8956 attributes.set_embedded_opmask_register_specifier(mask); 8957 if (merge) { 8958 attributes.reset_is_clear_context(); 8959 } 8960 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8961 emit_int8((unsigned char)0x65); 8962 emit_int8((unsigned char)(0xC0 | encode)); 8963 } 8964 8965 void Assembler::evblendmps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8966 assert(VM_Version::supports_evex(), ""); 8967 // Encoding: EVEX.NDS.XXX.66.0F38.W0 65 /r 8968 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8969 attributes.set_is_evex_instruction(); 8970 attributes.set_embedded_opmask_register_specifier(mask); 8971 if (merge) { 8972 attributes.reset_is_clear_context(); 8973 } 8974 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8975 emit_int8((unsigned char)0x65); 8976 emit_int8((unsigned char)(0xC0 | encode)); 8977 } 8978 8979 void Assembler::evpblendmb (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8980 assert(VM_Version::supports_evex(), ""); 8981 assert(VM_Version::supports_avx512bw(), ""); 8982 // Encoding: EVEX.NDS.512.66.0F38.W0 66 /r 8983 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8984 attributes.set_is_evex_instruction(); 8985 attributes.set_embedded_opmask_register_specifier(mask); 8986 if (merge) { 8987 attributes.reset_is_clear_context(); 8988 } 8989 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 8990 emit_int8((unsigned char)0x66); 8991 emit_int8((unsigned char)(0xC0 | encode)); 8992 } 8993 8994 void Assembler::evpblendmw (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8995 assert(VM_Version::supports_evex(), ""); 8996 assert(VM_Version::supports_avx512bw(), ""); 8997 // Encoding: EVEX.NDS.512.66.0F38.W1 66 /r 8998 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 8999 attributes.set_is_evex_instruction(); 9000 attributes.set_embedded_opmask_register_specifier(mask); 9001 if (merge) { 9002 attributes.reset_is_clear_context(); 9003 } 9004 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9005 emit_int8((unsigned char)0x66); 9006 emit_int8((unsigned char)(0xC0 | encode)); 9007 } 9008 9009 void Assembler::evpblendmd (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9010 assert(VM_Version::supports_evex(), ""); 9011 //Encoding: EVEX.NDS.512.66.0F38.W0 64 /r 9012 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9013 attributes.set_is_evex_instruction(); 9014 attributes.set_embedded_opmask_register_specifier(mask); 9015 if (merge) { 9016 attributes.reset_is_clear_context(); 9017 } 9018 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9019 emit_int8((unsigned char)0x64); 9020 emit_int8((unsigned char)(0xC0 | encode)); 9021 } 9022 9023 void Assembler::evpblendmq (XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9024 assert(VM_Version::supports_evex(), ""); 9025 //Encoding: EVEX.NDS.512.66.0F38.W1 64 /r 9026 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 9027 attributes.set_is_evex_instruction(); 9028 attributes.set_embedded_opmask_register_specifier(mask); 9029 if (merge) { 9030 attributes.reset_is_clear_context(); 9031 } 9032 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9033 emit_int8((unsigned char)0x64); 9034 emit_int8((unsigned char)(0xC0 | encode)); 9035 } 9036 9037 void Assembler::shlxl(Register dst, Register src1, Register src2) { 9038 assert(VM_Version::supports_bmi2(), ""); 9039 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9040 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9041 emit_int8((unsigned char)0xF7); 9042 emit_int8((unsigned char)(0xC0 | encode)); 9043 } 9044 9045 void Assembler::shlxq(Register dst, Register src1, Register src2) { 9046 assert(VM_Version::supports_bmi2(), ""); 9047 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9048 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 9049 emit_int8((unsigned char)0xF7); 9050 emit_int8((unsigned char)(0xC0 | encode)); 9051 } 9052 9053 #ifndef _LP64 9054 9055 void Assembler::incl(Register dst) { 9056 // Don't use it directly. Use MacroAssembler::incrementl() instead. 9057 emit_int8(0x40 | dst->encoding()); 9058 } 9059 9060 void Assembler::lea(Register dst, Address src) { 9061 leal(dst, src); 9062 } 9063 9064 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { 9065 InstructionMark im(this); 9066 emit_int8((unsigned char)0xC7); 9067 emit_operand(rax, dst); 9068 emit_data((int)imm32, rspec, 0); 9069 } 9070 9071 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { 9072 InstructionMark im(this); 9073 int encode = prefix_and_encode(dst->encoding()); 9074 emit_int8((unsigned char)(0xB8 | encode)); 9075 emit_data((int)imm32, rspec, 0); 9076 } 9077 9078 void Assembler::popa() { // 32bit 9079 emit_int8(0x61); 9080 } 9081 9082 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { 9083 InstructionMark im(this); 9084 emit_int8(0x68); 9085 emit_data(imm32, rspec, 0); 9086 } 9087 9088 void Assembler::pusha() { // 32bit 9089 emit_int8(0x60); 9090 } 9091 9092 void Assembler::set_byte_if_not_zero(Register dst) { 9093 emit_int8(0x0F); 9094 emit_int8((unsigned char)0x95); 9095 emit_int8((unsigned char)(0xE0 | dst->encoding())); 9096 } 9097 9098 void Assembler::shldl(Register dst, Register src) { 9099 emit_int8(0x0F); 9100 emit_int8((unsigned char)0xA5); 9101 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 9102 } 9103 9104 // 0F A4 / r ib 9105 void Assembler::shldl(Register dst, Register src, int8_t imm8) { 9106 emit_int8(0x0F); 9107 emit_int8((unsigned char)0xA4); 9108 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 9109 emit_int8(imm8); 9110 } 9111 9112 void Assembler::shrdl(Register dst, Register src) { 9113 emit_int8(0x0F); 9114 emit_int8((unsigned char)0xAD); 9115 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 9116 } 9117 9118 #else // LP64 9119 9120 void Assembler::set_byte_if_not_zero(Register dst) { 9121 int enc = prefix_and_encode(dst->encoding(), true); 9122 emit_int8(0x0F); 9123 emit_int8((unsigned char)0x95); 9124 emit_int8((unsigned char)(0xE0 | enc)); 9125 } 9126 9127 // 64bit only pieces of the assembler 9128 // This should only be used by 64bit instructions that can use rip-relative 9129 // it cannot be used by instructions that want an immediate value. 9130 9131 bool Assembler::reachable(AddressLiteral adr) { 9132 int64_t disp; 9133 // None will force a 64bit literal to the code stream. Likely a placeholder 9134 // for something that will be patched later and we need to certain it will 9135 // always be reachable. 9136 if (adr.reloc() == relocInfo::none) { 9137 return false; 9138 } 9139 if (adr.reloc() == relocInfo::internal_word_type) { 9140 // This should be rip relative and easily reachable. 9141 return true; 9142 } 9143 if (adr.reloc() == relocInfo::virtual_call_type || 9144 adr.reloc() == relocInfo::opt_virtual_call_type || 9145 adr.reloc() == relocInfo::static_call_type || 9146 adr.reloc() == relocInfo::static_stub_type ) { 9147 // This should be rip relative within the code cache and easily 9148 // reachable until we get huge code caches. (At which point 9149 // ic code is going to have issues). 9150 return true; 9151 } 9152 if (adr.reloc() != relocInfo::external_word_type && 9153 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special 9154 adr.reloc() != relocInfo::poll_type && // relocs to identify them 9155 adr.reloc() != relocInfo::runtime_call_type ) { 9156 return false; 9157 } 9158 9159 // Stress the correction code 9160 if (ForceUnreachable) { 9161 // Must be runtimecall reloc, see if it is in the codecache 9162 // Flipping stuff in the codecache to be unreachable causes issues 9163 // with things like inline caches where the additional instructions 9164 // are not handled. 9165 if (CodeCache::find_blob(adr._target) == NULL) { 9166 return false; 9167 } 9168 } 9169 // For external_word_type/runtime_call_type if it is reachable from where we 9170 // are now (possibly a temp buffer) and where we might end up 9171 // anywhere in the codeCache then we are always reachable. 9172 // This would have to change if we ever save/restore shared code 9173 // to be more pessimistic. 9174 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); 9175 if (!is_simm32(disp)) return false; 9176 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); 9177 if (!is_simm32(disp)) return false; 9178 9179 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int)); 9180 9181 // Because rip relative is a disp + address_of_next_instruction and we 9182 // don't know the value of address_of_next_instruction we apply a fudge factor 9183 // to make sure we will be ok no matter the size of the instruction we get placed into. 9184 // We don't have to fudge the checks above here because they are already worst case. 9185 9186 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal 9187 // + 4 because better safe than sorry. 9188 const int fudge = 12 + 4; 9189 if (disp < 0) { 9190 disp -= fudge; 9191 } else { 9192 disp += fudge; 9193 } 9194 return is_simm32(disp); 9195 } 9196 9197 // Check if the polling page is not reachable from the code cache using rip-relative 9198 // addressing. 9199 bool Assembler::is_polling_page_far() { 9200 intptr_t addr = (intptr_t)os::get_polling_page(); 9201 return ForceUnreachable || 9202 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) || 9203 !is_simm32(addr - (intptr_t)CodeCache::high_bound()); 9204 } 9205 9206 void Assembler::emit_data64(jlong data, 9207 relocInfo::relocType rtype, 9208 int format) { 9209 if (rtype == relocInfo::none) { 9210 emit_int64(data); 9211 } else { 9212 emit_data64(data, Relocation::spec_simple(rtype), format); 9213 } 9214 } 9215 9216 void Assembler::emit_data64(jlong data, 9217 RelocationHolder const& rspec, 9218 int format) { 9219 assert(imm_operand == 0, "default format must be immediate in this file"); 9220 assert(imm_operand == format, "must be immediate"); 9221 assert(inst_mark() != NULL, "must be inside InstructionMark"); 9222 // Do not use AbstractAssembler::relocate, which is not intended for 9223 // embedded words. Instead, relocate to the enclosing instruction. 9224 code_section()->relocate(inst_mark(), rspec, format); 9225 #ifdef ASSERT 9226 check_relocation(rspec, format); 9227 #endif 9228 emit_int64(data); 9229 } 9230 9231 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 9232 if (reg_enc >= 8) { 9233 prefix(REX_B); 9234 reg_enc -= 8; 9235 } else if (byteinst && reg_enc >= 4) { 9236 prefix(REX); 9237 } 9238 return reg_enc; 9239 } 9240 9241 int Assembler::prefixq_and_encode(int reg_enc) { 9242 if (reg_enc < 8) { 9243 prefix(REX_W); 9244 } else { 9245 prefix(REX_WB); 9246 reg_enc -= 8; 9247 } 9248 return reg_enc; 9249 } 9250 9251 int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte) { 9252 if (dst_enc < 8) { 9253 if (src_enc >= 8) { 9254 prefix(REX_B); 9255 src_enc -= 8; 9256 } else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) { 9257 prefix(REX); 9258 } 9259 } else { 9260 if (src_enc < 8) { 9261 prefix(REX_R); 9262 } else { 9263 prefix(REX_RB); 9264 src_enc -= 8; 9265 } 9266 dst_enc -= 8; 9267 } 9268 return dst_enc << 3 | src_enc; 9269 } 9270 9271 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { 9272 if (dst_enc < 8) { 9273 if (src_enc < 8) { 9274 prefix(REX_W); 9275 } else { 9276 prefix(REX_WB); 9277 src_enc -= 8; 9278 } 9279 } else { 9280 if (src_enc < 8) { 9281 prefix(REX_WR); 9282 } else { 9283 prefix(REX_WRB); 9284 src_enc -= 8; 9285 } 9286 dst_enc -= 8; 9287 } 9288 return dst_enc << 3 | src_enc; 9289 } 9290 9291 void Assembler::prefix(Register reg) { 9292 if (reg->encoding() >= 8) { 9293 prefix(REX_B); 9294 } 9295 } 9296 9297 void Assembler::prefix(Register dst, Register src, Prefix p) { 9298 if (src->encoding() >= 8) { 9299 p = (Prefix)(p | REX_B); 9300 } 9301 if (dst->encoding() >= 8) { 9302 p = (Prefix)( p | REX_R); 9303 } 9304 if (p != Prefix_EMPTY) { 9305 // do not generate an empty prefix 9306 prefix(p); 9307 } 9308 } 9309 9310 void Assembler::prefix(Register dst, Address adr, Prefix p) { 9311 if (adr.base_needs_rex()) { 9312 if (adr.index_needs_rex()) { 9313 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 9314 } else { 9315 prefix(REX_B); 9316 } 9317 } else { 9318 if (adr.index_needs_rex()) { 9319 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 9320 } 9321 } 9322 if (dst->encoding() >= 8) { 9323 p = (Prefix)(p | REX_R); 9324 } 9325 if (p != Prefix_EMPTY) { 9326 // do not generate an empty prefix 9327 prefix(p); 9328 } 9329 } 9330 9331 void Assembler::prefix(Address adr) { 9332 if (adr.base_needs_rex()) { 9333 if (adr.index_needs_rex()) { 9334 prefix(REX_XB); 9335 } else { 9336 prefix(REX_B); 9337 } 9338 } else { 9339 if (adr.index_needs_rex()) { 9340 prefix(REX_X); 9341 } 9342 } 9343 } 9344 9345 void Assembler::prefixq(Address adr) { 9346 if (adr.base_needs_rex()) { 9347 if (adr.index_needs_rex()) { 9348 prefix(REX_WXB); 9349 } else { 9350 prefix(REX_WB); 9351 } 9352 } else { 9353 if (adr.index_needs_rex()) { 9354 prefix(REX_WX); 9355 } else { 9356 prefix(REX_W); 9357 } 9358 } 9359 } 9360 9361 9362 void Assembler::prefix(Address adr, Register reg, bool byteinst) { 9363 if (reg->encoding() < 8) { 9364 if (adr.base_needs_rex()) { 9365 if (adr.index_needs_rex()) { 9366 prefix(REX_XB); 9367 } else { 9368 prefix(REX_B); 9369 } 9370 } else { 9371 if (adr.index_needs_rex()) { 9372 prefix(REX_X); 9373 } else if (byteinst && reg->encoding() >= 4 ) { 9374 prefix(REX); 9375 } 9376 } 9377 } else { 9378 if (adr.base_needs_rex()) { 9379 if (adr.index_needs_rex()) { 9380 prefix(REX_RXB); 9381 } else { 9382 prefix(REX_RB); 9383 } 9384 } else { 9385 if (adr.index_needs_rex()) { 9386 prefix(REX_RX); 9387 } else { 9388 prefix(REX_R); 9389 } 9390 } 9391 } 9392 } 9393 9394 void Assembler::prefixq(Address adr, Register src) { 9395 if (src->encoding() < 8) { 9396 if (adr.base_needs_rex()) { 9397 if (adr.index_needs_rex()) { 9398 prefix(REX_WXB); 9399 } else { 9400 prefix(REX_WB); 9401 } 9402 } else { 9403 if (adr.index_needs_rex()) { 9404 prefix(REX_WX); 9405 } else { 9406 prefix(REX_W); 9407 } 9408 } 9409 } else { 9410 if (adr.base_needs_rex()) { 9411 if (adr.index_needs_rex()) { 9412 prefix(REX_WRXB); 9413 } else { 9414 prefix(REX_WRB); 9415 } 9416 } else { 9417 if (adr.index_needs_rex()) { 9418 prefix(REX_WRX); 9419 } else { 9420 prefix(REX_WR); 9421 } 9422 } 9423 } 9424 } 9425 9426 void Assembler::prefix(Address adr, XMMRegister reg) { 9427 if (reg->encoding() < 8) { 9428 if (adr.base_needs_rex()) { 9429 if (adr.index_needs_rex()) { 9430 prefix(REX_XB); 9431 } else { 9432 prefix(REX_B); 9433 } 9434 } else { 9435 if (adr.index_needs_rex()) { 9436 prefix(REX_X); 9437 } 9438 } 9439 } else { 9440 if (adr.base_needs_rex()) { 9441 if (adr.index_needs_rex()) { 9442 prefix(REX_RXB); 9443 } else { 9444 prefix(REX_RB); 9445 } 9446 } else { 9447 if (adr.index_needs_rex()) { 9448 prefix(REX_RX); 9449 } else { 9450 prefix(REX_R); 9451 } 9452 } 9453 } 9454 } 9455 9456 void Assembler::prefixq(Address adr, XMMRegister src) { 9457 if (src->encoding() < 8) { 9458 if (adr.base_needs_rex()) { 9459 if (adr.index_needs_rex()) { 9460 prefix(REX_WXB); 9461 } else { 9462 prefix(REX_WB); 9463 } 9464 } else { 9465 if (adr.index_needs_rex()) { 9466 prefix(REX_WX); 9467 } else { 9468 prefix(REX_W); 9469 } 9470 } 9471 } else { 9472 if (adr.base_needs_rex()) { 9473 if (adr.index_needs_rex()) { 9474 prefix(REX_WRXB); 9475 } else { 9476 prefix(REX_WRB); 9477 } 9478 } else { 9479 if (adr.index_needs_rex()) { 9480 prefix(REX_WRX); 9481 } else { 9482 prefix(REX_WR); 9483 } 9484 } 9485 } 9486 } 9487 9488 void Assembler::adcq(Register dst, int32_t imm32) { 9489 (void) prefixq_and_encode(dst->encoding()); 9490 emit_arith(0x81, 0xD0, dst, imm32); 9491 } 9492 9493 void Assembler::adcq(Register dst, Address src) { 9494 InstructionMark im(this); 9495 prefixq(src, dst); 9496 emit_int8(0x13); 9497 emit_operand(dst, src); 9498 } 9499 9500 void Assembler::adcq(Register dst, Register src) { 9501 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 9502 emit_arith(0x13, 0xC0, dst, src); 9503 } 9504 9505 void Assembler::addq(Address dst, int32_t imm32) { 9506 InstructionMark im(this); 9507 prefixq(dst); 9508 emit_arith_operand(0x81, rax, dst,imm32); 9509 } 9510 9511 void Assembler::addq(Address dst, Register src) { 9512 InstructionMark im(this); 9513 prefixq(dst, src); 9514 emit_int8(0x01); 9515 emit_operand(src, dst); 9516 } 9517 9518 void Assembler::addq(Register dst, int32_t imm32) { 9519 (void) prefixq_and_encode(dst->encoding()); 9520 emit_arith(0x81, 0xC0, dst, imm32); 9521 } 9522 9523 void Assembler::addq(Register dst, Address src) { 9524 InstructionMark im(this); 9525 prefixq(src, dst); 9526 emit_int8(0x03); 9527 emit_operand(dst, src); 9528 } 9529 9530 void Assembler::addq(Register dst, Register src) { 9531 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 9532 emit_arith(0x03, 0xC0, dst, src); 9533 } 9534 9535 void Assembler::adcxq(Register dst, Register src) { 9536 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 9537 emit_int8((unsigned char)0x66); 9538 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 9539 emit_int8(0x0F); 9540 emit_int8(0x38); 9541 emit_int8((unsigned char)0xF6); 9542 emit_int8((unsigned char)(0xC0 | encode)); 9543 } 9544 9545 void Assembler::adoxq(Register dst, Register src) { 9546 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 9547 emit_int8((unsigned char)0xF3); 9548 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 9549 emit_int8(0x0F); 9550 emit_int8(0x38); 9551 emit_int8((unsigned char)0xF6); 9552 emit_int8((unsigned char)(0xC0 | encode)); 9553 } 9554 9555 void Assembler::andq(Address dst, int32_t imm32) { 9556 InstructionMark im(this); 9557 prefixq(dst); 9558 emit_int8((unsigned char)0x81); 9559 emit_operand(rsp, dst, 4); 9560 emit_int32(imm32); 9561 } 9562 9563 void Assembler::andq(Register dst, int32_t imm32) { 9564 (void) prefixq_and_encode(dst->encoding()); 9565 emit_arith(0x81, 0xE0, dst, imm32); 9566 } 9567 9568 void Assembler::andq(Register dst, Address src) { 9569 InstructionMark im(this); 9570 prefixq(src, dst); 9571 emit_int8(0x23); 9572 emit_operand(dst, src); 9573 } 9574 9575 void Assembler::andq(Register dst, Register src) { 9576 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 9577 emit_arith(0x23, 0xC0, dst, src); 9578 } 9579 9580 void Assembler::andnq(Register dst, Register src1, Register src2) { 9581 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 9582 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9583 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 9584 emit_int8((unsigned char)0xF2); 9585 emit_int8((unsigned char)(0xC0 | encode)); 9586 } 9587 9588 void Assembler::andnq(Register dst, Register src1, Address src2) { 9589 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 9590 InstructionMark im(this); 9591 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9592 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 9593 emit_int8((unsigned char)0xF2); 9594 emit_operand(dst, src2); 9595 } 9596 9597 void Assembler::bsfq(Register dst, Register src) { 9598 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 9599 emit_int8(0x0F); 9600 emit_int8((unsigned char)0xBC); 9601 emit_int8((unsigned char)(0xC0 | encode)); 9602 } 9603 9604 void Assembler::bsrq(Register dst, Register src) { 9605 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 9606 emit_int8(0x0F); 9607 emit_int8((unsigned char)0xBD); 9608 emit_int8((unsigned char)(0xC0 | encode)); 9609 } 9610 9611 void Assembler::bswapq(Register reg) { 9612 int encode = prefixq_and_encode(reg->encoding()); 9613 emit_int8(0x0F); 9614 emit_int8((unsigned char)(0xC8 | encode)); 9615 } 9616 9617 void Assembler::blsiq(Register dst, Register src) { 9618 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 9619 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9620 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 9621 emit_int8((unsigned char)0xF3); 9622 emit_int8((unsigned char)(0xC0 | encode)); 9623 } 9624 9625 void Assembler::blsiq(Register dst, Address src) { 9626 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 9627 InstructionMark im(this); 9628 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9629 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 9630 emit_int8((unsigned char)0xF3); 9631 emit_operand(rbx, src); 9632 } 9633 9634 void Assembler::blsmskq(Register dst, Register src) { 9635 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 9636 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9637 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 9638 emit_int8((unsigned char)0xF3); 9639 emit_int8((unsigned char)(0xC0 | encode)); 9640 } 9641 9642 void Assembler::blsmskq(Register dst, Address src) { 9643 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 9644 InstructionMark im(this); 9645 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9646 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 9647 emit_int8((unsigned char)0xF3); 9648 emit_operand(rdx, src); 9649 } 9650 9651 void Assembler::blsrq(Register dst, Register src) { 9652 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 9653 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9654 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 9655 emit_int8((unsigned char)0xF3); 9656 emit_int8((unsigned char)(0xC0 | encode)); 9657 } 9658 9659 void Assembler::blsrq(Register dst, Address src) { 9660 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 9661 InstructionMark im(this); 9662 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 9663 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 9664 emit_int8((unsigned char)0xF3); 9665 emit_operand(rcx, src); 9666 } 9667 9668 void Assembler::cdqq() { 9669 prefix(REX_W); 9670 emit_int8((unsigned char)0x99); 9671 } 9672 9673 void Assembler::clflush(Address adr) { 9674 prefix(adr); 9675 emit_int8(0x0F); 9676 emit_int8((unsigned char)0xAE); 9677 emit_operand(rdi, adr); 9678 } 9679 9680 void Assembler::cmovq(Condition cc, Register dst, Register src) { 9681 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 9682 emit_int8(0x0F); 9683 emit_int8(0x40 | cc); 9684 emit_int8((unsigned char)(0xC0 | encode)); 9685 } 9686 9687 void Assembler::cmovq(Condition cc, Register dst, Address src) { 9688 InstructionMark im(this); 9689 prefixq(src, dst); 9690 emit_int8(0x0F); 9691 emit_int8(0x40 | cc); 9692 emit_operand(dst, src); 9693 } 9694 9695 void Assembler::cmpq(Address dst, int32_t imm32) { 9696 InstructionMark im(this); 9697 prefixq(dst); 9698 emit_int8((unsigned char)0x81); 9699 emit_operand(rdi, dst, 4); 9700 emit_int32(imm32); 9701 } 9702 9703 void Assembler::cmpq(Register dst, int32_t imm32) { 9704 (void) prefixq_and_encode(dst->encoding()); 9705 emit_arith(0x81, 0xF8, dst, imm32); 9706 } 9707 9708 void Assembler::cmpq(Address dst, Register src) { 9709 InstructionMark im(this); 9710 prefixq(dst, src); 9711 emit_int8(0x3B); 9712 emit_operand(src, dst); 9713 } 9714 9715 void Assembler::cmpq(Register dst, Register src) { 9716 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 9717 emit_arith(0x3B, 0xC0, dst, src); 9718 } 9719 9720 void Assembler::cmpq(Register dst, Address src) { 9721 InstructionMark im(this); 9722 prefixq(src, dst); 9723 emit_int8(0x3B); 9724 emit_operand(dst, src); 9725 } 9726 9727 void Assembler::cmpxchgq(Register reg, Address adr) { 9728 InstructionMark im(this); 9729 prefixq(adr, reg); 9730 emit_int8(0x0F); 9731 emit_int8((unsigned char)0xB1); 9732 emit_operand(reg, adr); 9733 } 9734 9735 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { 9736 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 9737 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 9738 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 9739 emit_int8(0x2A); 9740 emit_int8((unsigned char)(0xC0 | encode)); 9741 } 9742 9743 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { 9744 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 9745 InstructionMark im(this); 9746 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 9747 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 9748 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 9749 emit_int8(0x2A); 9750 emit_operand(dst, src); 9751 } 9752 9753 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) { 9754 NOT_LP64(assert(VM_Version::supports_sse(), "")); 9755 InstructionMark im(this); 9756 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 9757 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 9758 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 9759 emit_int8(0x2A); 9760 emit_operand(dst, src); 9761 } 9762 9763 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { 9764 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 9765 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 9766 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 9767 emit_int8(0x2C); 9768 emit_int8((unsigned char)(0xC0 | encode)); 9769 } 9770 9771 void Assembler::cvttss2siq(Register dst, XMMRegister src) { 9772 NOT_LP64(assert(VM_Version::supports_sse(), "")); 9773 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 9774 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 9775 emit_int8(0x2C); 9776 emit_int8((unsigned char)(0xC0 | encode)); 9777 } 9778 9779 void Assembler::decl(Register dst) { 9780 // Don't use it directly. Use MacroAssembler::decrementl() instead. 9781 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) 9782 int encode = prefix_and_encode(dst->encoding()); 9783 emit_int8((unsigned char)0xFF); 9784 emit_int8((unsigned char)(0xC8 | encode)); 9785 } 9786 9787 void Assembler::decq(Register dst) { 9788 // Don't use it directly. Use MacroAssembler::decrementq() instead. 9789 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 9790 int encode = prefixq_and_encode(dst->encoding()); 9791 emit_int8((unsigned char)0xFF); 9792 emit_int8(0xC8 | encode); 9793 } 9794 9795 void Assembler::decq(Address dst) { 9796 // Don't use it directly. Use MacroAssembler::decrementq() instead. 9797 InstructionMark im(this); 9798 prefixq(dst); 9799 emit_int8((unsigned char)0xFF); 9800 emit_operand(rcx, dst); 9801 } 9802 9803 void Assembler::fxrstor(Address src) { 9804 prefixq(src); 9805 emit_int8(0x0F); 9806 emit_int8((unsigned char)0xAE); 9807 emit_operand(as_Register(1), src); 9808 } 9809 9810 void Assembler::xrstor(Address src) { 9811 prefixq(src); 9812 emit_int8(0x0F); 9813 emit_int8((unsigned char)0xAE); 9814 emit_operand(as_Register(5), src); 9815 } 9816 9817 void Assembler::fxsave(Address dst) { 9818 prefixq(dst); 9819 emit_int8(0x0F); 9820 emit_int8((unsigned char)0xAE); 9821 emit_operand(as_Register(0), dst); 9822 } 9823 9824 void Assembler::xsave(Address dst) { 9825 prefixq(dst); 9826 emit_int8(0x0F); 9827 emit_int8((unsigned char)0xAE); 9828 emit_operand(as_Register(4), dst); 9829 } 9830 9831 void Assembler::idivq(Register src) { 9832 int encode = prefixq_and_encode(src->encoding()); 9833 emit_int8((unsigned char)0xF7); 9834 emit_int8((unsigned char)(0xF8 | encode)); 9835 } 9836 9837 void Assembler::imulq(Register dst, Register src) { 9838 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 9839 emit_int8(0x0F); 9840 emit_int8((unsigned char)0xAF); 9841 emit_int8((unsigned char)(0xC0 | encode)); 9842 } 9843 9844 void Assembler::imulq(Register dst, Register src, int value) { 9845 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 9846 if (is8bit(value)) { 9847 emit_int8(0x6B); 9848 emit_int8((unsigned char)(0xC0 | encode)); 9849 emit_int8(value & 0xFF); 9850 } else { 9851 emit_int8(0x69); 9852 emit_int8((unsigned char)(0xC0 | encode)); 9853 emit_int32(value); 9854 } 9855 } 9856 9857 void Assembler::imulq(Register dst, Address src) { 9858 InstructionMark im(this); 9859 prefixq(src, dst); 9860 emit_int8(0x0F); 9861 emit_int8((unsigned char) 0xAF); 9862 emit_operand(dst, src); 9863 } 9864 9865 void Assembler::incl(Register dst) { 9866 // Don't use it directly. Use MacroAssembler::incrementl() instead. 9867 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 9868 int encode = prefix_and_encode(dst->encoding()); 9869 emit_int8((unsigned char)0xFF); 9870 emit_int8((unsigned char)(0xC0 | encode)); 9871 } 9872 9873 void Assembler::incq(Register dst) { 9874 // Don't use it directly. Use MacroAssembler::incrementq() instead. 9875 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 9876 int encode = prefixq_and_encode(dst->encoding()); 9877 emit_int8((unsigned char)0xFF); 9878 emit_int8((unsigned char)(0xC0 | encode)); 9879 } 9880 9881 void Assembler::incq(Address dst) { 9882 // Don't use it directly. Use MacroAssembler::incrementq() instead. 9883 InstructionMark im(this); 9884 prefixq(dst); 9885 emit_int8((unsigned char)0xFF); 9886 emit_operand(rax, dst); 9887 } 9888 9889 void Assembler::lea(Register dst, Address src) { 9890 leaq(dst, src); 9891 } 9892 9893 void Assembler::leaq(Register dst, Address src) { 9894 InstructionMark im(this); 9895 prefixq(src, dst); 9896 emit_int8((unsigned char)0x8D); 9897 emit_operand(dst, src); 9898 } 9899 9900 void Assembler::mov64(Register dst, int64_t imm64) { 9901 InstructionMark im(this); 9902 int encode = prefixq_and_encode(dst->encoding()); 9903 emit_int8((unsigned char)(0xB8 | encode)); 9904 emit_int64(imm64); 9905 } 9906 9907 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 9908 InstructionMark im(this); 9909 int encode = prefixq_and_encode(dst->encoding()); 9910 emit_int8(0xB8 | encode); 9911 emit_data64(imm64, rspec); 9912 } 9913 9914 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { 9915 InstructionMark im(this); 9916 int encode = prefix_and_encode(dst->encoding()); 9917 emit_int8((unsigned char)(0xB8 | encode)); 9918 emit_data((int)imm32, rspec, narrow_oop_operand); 9919 } 9920 9921 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { 9922 InstructionMark im(this); 9923 prefix(dst); 9924 emit_int8((unsigned char)0xC7); 9925 emit_operand(rax, dst, 4); 9926 emit_data((int)imm32, rspec, narrow_oop_operand); 9927 } 9928 9929 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { 9930 InstructionMark im(this); 9931 int encode = prefix_and_encode(src1->encoding()); 9932 emit_int8((unsigned char)0x81); 9933 emit_int8((unsigned char)(0xF8 | encode)); 9934 emit_data((int)imm32, rspec, narrow_oop_operand); 9935 } 9936 9937 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { 9938 InstructionMark im(this); 9939 prefix(src1); 9940 emit_int8((unsigned char)0x81); 9941 emit_operand(rax, src1, 4); 9942 emit_data((int)imm32, rspec, narrow_oop_operand); 9943 } 9944 9945 void Assembler::lzcntq(Register dst, Register src) { 9946 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 9947 emit_int8((unsigned char)0xF3); 9948 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 9949 emit_int8(0x0F); 9950 emit_int8((unsigned char)0xBD); 9951 emit_int8((unsigned char)(0xC0 | encode)); 9952 } 9953 9954 void Assembler::movdq(XMMRegister dst, Register src) { 9955 // table D-1 says MMX/SSE2 9956 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 9957 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 9958 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9959 emit_int8(0x6E); 9960 emit_int8((unsigned char)(0xC0 | encode)); 9961 } 9962 9963 void Assembler::movdq(Register dst, XMMRegister src) { 9964 // table D-1 says MMX/SSE2 9965 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 9966 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 9967 // swap src/dst to get correct prefix 9968 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 9969 emit_int8(0x7E); 9970 emit_int8((unsigned char)(0xC0 | encode)); 9971 } 9972 9973 void Assembler::movq(Register dst, Register src) { 9974 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 9975 emit_int8((unsigned char)0x8B); 9976 emit_int8((unsigned char)(0xC0 | encode)); 9977 } 9978 9979 void Assembler::movq(Register dst, Address src) { 9980 InstructionMark im(this); 9981 prefixq(src, dst); 9982 emit_int8((unsigned char)0x8B); 9983 emit_operand(dst, src); 9984 } 9985 9986 void Assembler::movq(Address dst, Register src) { 9987 InstructionMark im(this); 9988 prefixq(dst, src); 9989 emit_int8((unsigned char)0x89); 9990 emit_operand(src, dst); 9991 } 9992 9993 void Assembler::movsbq(Register dst, Address src) { 9994 InstructionMark im(this); 9995 prefixq(src, dst); 9996 emit_int8(0x0F); 9997 emit_int8((unsigned char)0xBE); 9998 emit_operand(dst, src); 9999 } 10000 10001 void Assembler::movsbq(Register dst, Register src) { 10002 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 10003 emit_int8(0x0F); 10004 emit_int8((unsigned char)0xBE); 10005 emit_int8((unsigned char)(0xC0 | encode)); 10006 } 10007 10008 void Assembler::movslq(Register dst, int32_t imm32) { 10009 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 10010 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 10011 // as a result we shouldn't use until tested at runtime... 10012 ShouldNotReachHere(); 10013 InstructionMark im(this); 10014 int encode = prefixq_and_encode(dst->encoding()); 10015 emit_int8((unsigned char)(0xC7 | encode)); 10016 emit_int32(imm32); 10017 } 10018 10019 void Assembler::movslq(Address dst, int32_t imm32) { 10020 assert(is_simm32(imm32), "lost bits"); 10021 InstructionMark im(this); 10022 prefixq(dst); 10023 emit_int8((unsigned char)0xC7); 10024 emit_operand(rax, dst, 4); 10025 emit_int32(imm32); 10026 } 10027 10028 void Assembler::movslq(Register dst, Address src) { 10029 InstructionMark im(this); 10030 prefixq(src, dst); 10031 emit_int8(0x63); 10032 emit_operand(dst, src); 10033 } 10034 10035 void Assembler::movslq(Register dst, Register src) { 10036 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 10037 emit_int8(0x63); 10038 emit_int8((unsigned char)(0xC0 | encode)); 10039 } 10040 10041 void Assembler::movswq(Register dst, Address src) { 10042 InstructionMark im(this); 10043 prefixq(src, dst); 10044 emit_int8(0x0F); 10045 emit_int8((unsigned char)0xBF); 10046 emit_operand(dst, src); 10047 } 10048 10049 void Assembler::movswq(Register dst, Register src) { 10050 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 10051 emit_int8((unsigned char)0x0F); 10052 emit_int8((unsigned char)0xBF); 10053 emit_int8((unsigned char)(0xC0 | encode)); 10054 } 10055 10056 void Assembler::movzbq(Register dst, Address src) { 10057 InstructionMark im(this); 10058 prefixq(src, dst); 10059 emit_int8((unsigned char)0x0F); 10060 emit_int8((unsigned char)0xB6); 10061 emit_operand(dst, src); 10062 } 10063 10064 void Assembler::movzbq(Register dst, Register src) { 10065 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 10066 emit_int8(0x0F); 10067 emit_int8((unsigned char)0xB6); 10068 emit_int8(0xC0 | encode); 10069 } 10070 10071 void Assembler::movzwq(Register dst, Address src) { 10072 InstructionMark im(this); 10073 prefixq(src, dst); 10074 emit_int8((unsigned char)0x0F); 10075 emit_int8((unsigned char)0xB7); 10076 emit_operand(dst, src); 10077 } 10078 10079 void Assembler::movzwq(Register dst, Register src) { 10080 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 10081 emit_int8((unsigned char)0x0F); 10082 emit_int8((unsigned char)0xB7); 10083 emit_int8((unsigned char)(0xC0 | encode)); 10084 } 10085 10086 void Assembler::mulq(Address src) { 10087 InstructionMark im(this); 10088 prefixq(src); 10089 emit_int8((unsigned char)0xF7); 10090 emit_operand(rsp, src); 10091 } 10092 10093 void Assembler::mulq(Register src) { 10094 int encode = prefixq_and_encode(src->encoding()); 10095 emit_int8((unsigned char)0xF7); 10096 emit_int8((unsigned char)(0xE0 | encode)); 10097 } 10098 10099 void Assembler::mulxq(Register dst1, Register dst2, Register src) { 10100 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 10101 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 10102 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 10103 emit_int8((unsigned char)0xF6); 10104 emit_int8((unsigned char)(0xC0 | encode)); 10105 } 10106 10107 void Assembler::negq(Register dst) { 10108 int encode = prefixq_and_encode(dst->encoding()); 10109 emit_int8((unsigned char)0xF7); 10110 emit_int8((unsigned char)(0xD8 | encode)); 10111 } 10112 10113 void Assembler::notq(Register dst) { 10114 int encode = prefixq_and_encode(dst->encoding()); 10115 emit_int8((unsigned char)0xF7); 10116 emit_int8((unsigned char)(0xD0 | encode)); 10117 } 10118 10119 void Assembler::orq(Address dst, int32_t imm32) { 10120 InstructionMark im(this); 10121 prefixq(dst); 10122 emit_int8((unsigned char)0x81); 10123 emit_operand(rcx, dst, 4); 10124 emit_int32(imm32); 10125 } 10126 10127 void Assembler::orq(Register dst, int32_t imm32) { 10128 (void) prefixq_and_encode(dst->encoding()); 10129 emit_arith(0x81, 0xC8, dst, imm32); 10130 } 10131 10132 void Assembler::orq(Register dst, Address src) { 10133 InstructionMark im(this); 10134 prefixq(src, dst); 10135 emit_int8(0x0B); 10136 emit_operand(dst, src); 10137 } 10138 10139 void Assembler::orq(Register dst, Register src) { 10140 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 10141 emit_arith(0x0B, 0xC0, dst, src); 10142 } 10143 10144 void Assembler::popa() { // 64bit 10145 movq(r15, Address(rsp, 0)); 10146 movq(r14, Address(rsp, wordSize)); 10147 movq(r13, Address(rsp, 2 * wordSize)); 10148 movq(r12, Address(rsp, 3 * wordSize)); 10149 movq(r11, Address(rsp, 4 * wordSize)); 10150 movq(r10, Address(rsp, 5 * wordSize)); 10151 movq(r9, Address(rsp, 6 * wordSize)); 10152 movq(r8, Address(rsp, 7 * wordSize)); 10153 movq(rdi, Address(rsp, 8 * wordSize)); 10154 movq(rsi, Address(rsp, 9 * wordSize)); 10155 movq(rbp, Address(rsp, 10 * wordSize)); 10156 // skip rsp 10157 movq(rbx, Address(rsp, 12 * wordSize)); 10158 movq(rdx, Address(rsp, 13 * wordSize)); 10159 movq(rcx, Address(rsp, 14 * wordSize)); 10160 movq(rax, Address(rsp, 15 * wordSize)); 10161 10162 addq(rsp, 16 * wordSize); 10163 } 10164 10165 void Assembler::popcntq(Register dst, Address src) { 10166 assert(VM_Version::supports_popcnt(), "must support"); 10167 InstructionMark im(this); 10168 emit_int8((unsigned char)0xF3); 10169 prefixq(src, dst); 10170 emit_int8((unsigned char)0x0F); 10171 emit_int8((unsigned char)0xB8); 10172 emit_operand(dst, src); 10173 } 10174 10175 void Assembler::popcntq(Register dst, Register src) { 10176 assert(VM_Version::supports_popcnt(), "must support"); 10177 emit_int8((unsigned char)0xF3); 10178 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 10179 emit_int8((unsigned char)0x0F); 10180 emit_int8((unsigned char)0xB8); 10181 emit_int8((unsigned char)(0xC0 | encode)); 10182 } 10183 10184 void Assembler::popq(Address dst) { 10185 InstructionMark im(this); 10186 prefixq(dst); 10187 emit_int8((unsigned char)0x8F); 10188 emit_operand(rax, dst); 10189 } 10190 10191 void Assembler::pusha() { // 64bit 10192 // we have to store original rsp. ABI says that 128 bytes 10193 // below rsp are local scratch. 10194 movq(Address(rsp, -5 * wordSize), rsp); 10195 10196 subq(rsp, 16 * wordSize); 10197 10198 movq(Address(rsp, 15 * wordSize), rax); 10199 movq(Address(rsp, 14 * wordSize), rcx); 10200 movq(Address(rsp, 13 * wordSize), rdx); 10201 movq(Address(rsp, 12 * wordSize), rbx); 10202 // skip rsp 10203 movq(Address(rsp, 10 * wordSize), rbp); 10204 movq(Address(rsp, 9 * wordSize), rsi); 10205 movq(Address(rsp, 8 * wordSize), rdi); 10206 movq(Address(rsp, 7 * wordSize), r8); 10207 movq(Address(rsp, 6 * wordSize), r9); 10208 movq(Address(rsp, 5 * wordSize), r10); 10209 movq(Address(rsp, 4 * wordSize), r11); 10210 movq(Address(rsp, 3 * wordSize), r12); 10211 movq(Address(rsp, 2 * wordSize), r13); 10212 movq(Address(rsp, wordSize), r14); 10213 movq(Address(rsp, 0), r15); 10214 } 10215 10216 void Assembler::pushq(Address src) { 10217 InstructionMark im(this); 10218 prefixq(src); 10219 emit_int8((unsigned char)0xFF); 10220 emit_operand(rsi, src); 10221 } 10222 10223 void Assembler::rclq(Register dst, int imm8) { 10224 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 10225 int encode = prefixq_and_encode(dst->encoding()); 10226 if (imm8 == 1) { 10227 emit_int8((unsigned char)0xD1); 10228 emit_int8((unsigned char)(0xD0 | encode)); 10229 } else { 10230 emit_int8((unsigned char)0xC1); 10231 emit_int8((unsigned char)(0xD0 | encode)); 10232 emit_int8(imm8); 10233 } 10234 } 10235 10236 void Assembler::rcrq(Register dst, int imm8) { 10237 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 10238 int encode = prefixq_and_encode(dst->encoding()); 10239 if (imm8 == 1) { 10240 emit_int8((unsigned char)0xD1); 10241 emit_int8((unsigned char)(0xD8 | encode)); 10242 } else { 10243 emit_int8((unsigned char)0xC1); 10244 emit_int8((unsigned char)(0xD8 | encode)); 10245 emit_int8(imm8); 10246 } 10247 } 10248 10249 void Assembler::rorq(Register dst, int imm8) { 10250 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 10251 int encode = prefixq_and_encode(dst->encoding()); 10252 if (imm8 == 1) { 10253 emit_int8((unsigned char)0xD1); 10254 emit_int8((unsigned char)(0xC8 | encode)); 10255 } else { 10256 emit_int8((unsigned char)0xC1); 10257 emit_int8((unsigned char)(0xc8 | encode)); 10258 emit_int8(imm8); 10259 } 10260 } 10261 10262 void Assembler::rorxq(Register dst, Register src, int imm8) { 10263 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 10264 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 10265 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 10266 emit_int8((unsigned char)0xF0); 10267 emit_int8((unsigned char)(0xC0 | encode)); 10268 emit_int8(imm8); 10269 } 10270 10271 void Assembler::rorxd(Register dst, Register src, int imm8) { 10272 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 10273 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 10274 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 10275 emit_int8((unsigned char)0xF0); 10276 emit_int8((unsigned char)(0xC0 | encode)); 10277 emit_int8(imm8); 10278 } 10279 10280 void Assembler::sarq(Register dst, int imm8) { 10281 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 10282 int encode = prefixq_and_encode(dst->encoding()); 10283 if (imm8 == 1) { 10284 emit_int8((unsigned char)0xD1); 10285 emit_int8((unsigned char)(0xF8 | encode)); 10286 } else { 10287 emit_int8((unsigned char)0xC1); 10288 emit_int8((unsigned char)(0xF8 | encode)); 10289 emit_int8(imm8); 10290 } 10291 } 10292 10293 void Assembler::sarq(Register dst) { 10294 int encode = prefixq_and_encode(dst->encoding()); 10295 emit_int8((unsigned char)0xD3); 10296 emit_int8((unsigned char)(0xF8 | encode)); 10297 } 10298 10299 void Assembler::sbbq(Address dst, int32_t imm32) { 10300 InstructionMark im(this); 10301 prefixq(dst); 10302 emit_arith_operand(0x81, rbx, dst, imm32); 10303 } 10304 10305 void Assembler::sbbq(Register dst, int32_t imm32) { 10306 (void) prefixq_and_encode(dst->encoding()); 10307 emit_arith(0x81, 0xD8, dst, imm32); 10308 } 10309 10310 void Assembler::sbbq(Register dst, Address src) { 10311 InstructionMark im(this); 10312 prefixq(src, dst); 10313 emit_int8(0x1B); 10314 emit_operand(dst, src); 10315 } 10316 10317 void Assembler::sbbq(Register dst, Register src) { 10318 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 10319 emit_arith(0x1B, 0xC0, dst, src); 10320 } 10321 10322 void Assembler::shlq(Register dst, int imm8) { 10323 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 10324 int encode = prefixq_and_encode(dst->encoding()); 10325 if (imm8 == 1) { 10326 emit_int8((unsigned char)0xD1); 10327 emit_int8((unsigned char)(0xE0 | encode)); 10328 } else { 10329 emit_int8((unsigned char)0xC1); 10330 emit_int8((unsigned char)(0xE0 | encode)); 10331 emit_int8(imm8); 10332 } 10333 } 10334 10335 void Assembler::shlq(Register dst) { 10336 int encode = prefixq_and_encode(dst->encoding()); 10337 emit_int8((unsigned char)0xD3); 10338 emit_int8((unsigned char)(0xE0 | encode)); 10339 } 10340 10341 void Assembler::shrq(Register dst, int imm8) { 10342 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 10343 int encode = prefixq_and_encode(dst->encoding()); 10344 emit_int8((unsigned char)0xC1); 10345 emit_int8((unsigned char)(0xE8 | encode)); 10346 emit_int8(imm8); 10347 } 10348 10349 void Assembler::shrq(Register dst) { 10350 int encode = prefixq_and_encode(dst->encoding()); 10351 emit_int8((unsigned char)0xD3); 10352 emit_int8(0xE8 | encode); 10353 } 10354 10355 void Assembler::subq(Address dst, int32_t imm32) { 10356 InstructionMark im(this); 10357 prefixq(dst); 10358 emit_arith_operand(0x81, rbp, dst, imm32); 10359 } 10360 10361 void Assembler::subq(Address dst, Register src) { 10362 InstructionMark im(this); 10363 prefixq(dst, src); 10364 emit_int8(0x29); 10365 emit_operand(src, dst); 10366 } 10367 10368 void Assembler::subq(Register dst, int32_t imm32) { 10369 (void) prefixq_and_encode(dst->encoding()); 10370 emit_arith(0x81, 0xE8, dst, imm32); 10371 } 10372 10373 // Force generation of a 4 byte immediate value even if it fits into 8bit 10374 void Assembler::subq_imm32(Register dst, int32_t imm32) { 10375 (void) prefixq_and_encode(dst->encoding()); 10376 emit_arith_imm32(0x81, 0xE8, dst, imm32); 10377 } 10378 10379 void Assembler::subq(Register dst, Address src) { 10380 InstructionMark im(this); 10381 prefixq(src, dst); 10382 emit_int8(0x2B); 10383 emit_operand(dst, src); 10384 } 10385 10386 void Assembler::subq(Register dst, Register src) { 10387 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 10388 emit_arith(0x2B, 0xC0, dst, src); 10389 } 10390 10391 void Assembler::testq(Register dst, int32_t imm32) { 10392 // not using emit_arith because test 10393 // doesn't support sign-extension of 10394 // 8bit operands 10395 int encode = dst->encoding(); 10396 if (encode == 0) { 10397 prefix(REX_W); 10398 emit_int8((unsigned char)0xA9); 10399 } else { 10400 encode = prefixq_and_encode(encode); 10401 emit_int8((unsigned char)0xF7); 10402 emit_int8((unsigned char)(0xC0 | encode)); 10403 } 10404 emit_int32(imm32); 10405 } 10406 10407 void Assembler::testq(Register dst, Register src) { 10408 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 10409 emit_arith(0x85, 0xC0, dst, src); 10410 } 10411 10412 void Assembler::testq(Register dst, Address src) { 10413 InstructionMark im(this); 10414 prefixq(src, dst); 10415 emit_int8((unsigned char)0x85); 10416 emit_operand(dst, src); 10417 } 10418 10419 void Assembler::xaddq(Address dst, Register src) { 10420 InstructionMark im(this); 10421 prefixq(dst, src); 10422 emit_int8(0x0F); 10423 emit_int8((unsigned char)0xC1); 10424 emit_operand(src, dst); 10425 } 10426 10427 void Assembler::xchgq(Register dst, Address src) { 10428 InstructionMark im(this); 10429 prefixq(src, dst); 10430 emit_int8((unsigned char)0x87); 10431 emit_operand(dst, src); 10432 } 10433 10434 void Assembler::xchgq(Register dst, Register src) { 10435 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 10436 emit_int8((unsigned char)0x87); 10437 emit_int8((unsigned char)(0xc0 | encode)); 10438 } 10439 10440 void Assembler::xorq(Register dst, Register src) { 10441 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 10442 emit_arith(0x33, 0xC0, dst, src); 10443 } 10444 10445 void Assembler::xorq(Register dst, Address src) { 10446 InstructionMark im(this); 10447 prefixq(src, dst); 10448 emit_int8(0x33); 10449 emit_operand(dst, src); 10450 } 10451 10452 #endif // !LP64