rev 10354 : imported patch vextrinscleanup2 rev 10355 : [mq]: vextrinscleanup3
1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "gc/shared/cardTableModRefBS.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/objectMonitor.hpp" 36 #include "runtime/os.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "utilities/macros.hpp" 40 #if INCLUDE_ALL_GCS 41 #include "gc/g1/g1CollectedHeap.inline.hpp" 42 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 43 #include "gc/g1/heapRegion.hpp" 44 #endif // INCLUDE_ALL_GCS 45 46 #ifdef PRODUCT 47 #define BLOCK_COMMENT(str) /* nothing */ 48 #define STOP(error) stop(error) 49 #else 50 #define BLOCK_COMMENT(str) block_comment(str) 51 #define STOP(error) block_comment(error); stop(error) 52 #endif 53 54 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 55 // Implementation of AddressLiteral 56 57 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms. 58 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = { 59 // -----------------Table 4.5 -------------------- // 60 16, 32, 64, // EVEX_FV(0) 61 4, 4, 4, // EVEX_FV(1) - with Evex.b 62 16, 32, 64, // EVEX_FV(2) - with Evex.w 63 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b 64 8, 16, 32, // EVEX_HV(0) 65 4, 4, 4, // EVEX_HV(1) - with Evex.b 66 // -----------------Table 4.6 -------------------- // 67 16, 32, 64, // EVEX_FVM(0) 68 1, 1, 1, // EVEX_T1S(0) 69 2, 2, 2, // EVEX_T1S(1) 70 4, 4, 4, // EVEX_T1S(2) 71 8, 8, 8, // EVEX_T1S(3) 72 4, 4, 4, // EVEX_T1F(0) 73 8, 8, 8, // EVEX_T1F(1) 74 8, 8, 8, // EVEX_T2(0) 75 0, 16, 16, // EVEX_T2(1) 76 0, 16, 16, // EVEX_T4(0) 77 0, 0, 32, // EVEX_T4(1) 78 0, 0, 32, // EVEX_T8(0) 79 8, 16, 32, // EVEX_HVM(0) 80 4, 8, 16, // EVEX_QVM(0) 81 2, 4, 8, // EVEX_OVM(0) 82 16, 16, 16, // EVEX_M128(0) 83 8, 32, 64, // EVEX_DUP(0) 84 0, 0, 0 // EVEX_NTUP 85 }; 86 87 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { 88 _is_lval = false; 89 _target = target; 90 switch (rtype) { 91 case relocInfo::oop_type: 92 case relocInfo::metadata_type: 93 // Oops are a special case. Normally they would be their own section 94 // but in cases like icBuffer they are literals in the code stream that 95 // we don't have a section for. We use none so that we get a literal address 96 // which is always patchable. 97 break; 98 case relocInfo::external_word_type: 99 _rspec = external_word_Relocation::spec(target); 100 break; 101 case relocInfo::internal_word_type: 102 _rspec = internal_word_Relocation::spec(target); 103 break; 104 case relocInfo::opt_virtual_call_type: 105 _rspec = opt_virtual_call_Relocation::spec(); 106 break; 107 case relocInfo::static_call_type: 108 _rspec = static_call_Relocation::spec(); 109 break; 110 case relocInfo::runtime_call_type: 111 _rspec = runtime_call_Relocation::spec(); 112 break; 113 case relocInfo::poll_type: 114 case relocInfo::poll_return_type: 115 _rspec = Relocation::spec_simple(rtype); 116 break; 117 case relocInfo::none: 118 break; 119 default: 120 ShouldNotReachHere(); 121 break; 122 } 123 } 124 125 // Implementation of Address 126 127 #ifdef _LP64 128 129 Address Address::make_array(ArrayAddress adr) { 130 // Not implementable on 64bit machines 131 // Should have been handled higher up the call chain. 132 ShouldNotReachHere(); 133 return Address(); 134 } 135 136 // exceedingly dangerous constructor 137 Address::Address(int disp, address loc, relocInfo::relocType rtype) { 138 _base = noreg; 139 _index = noreg; 140 _scale = no_scale; 141 _disp = disp; 142 switch (rtype) { 143 case relocInfo::external_word_type: 144 _rspec = external_word_Relocation::spec(loc); 145 break; 146 case relocInfo::internal_word_type: 147 _rspec = internal_word_Relocation::spec(loc); 148 break; 149 case relocInfo::runtime_call_type: 150 // HMM 151 _rspec = runtime_call_Relocation::spec(); 152 break; 153 case relocInfo::poll_type: 154 case relocInfo::poll_return_type: 155 _rspec = Relocation::spec_simple(rtype); 156 break; 157 case relocInfo::none: 158 break; 159 default: 160 ShouldNotReachHere(); 161 } 162 } 163 #else // LP64 164 165 Address Address::make_array(ArrayAddress adr) { 166 AddressLiteral base = adr.base(); 167 Address index = adr.index(); 168 assert(index._disp == 0, "must not have disp"); // maybe it can? 169 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); 170 array._rspec = base._rspec; 171 return array; 172 } 173 174 // exceedingly dangerous constructor 175 Address::Address(address loc, RelocationHolder spec) { 176 _base = noreg; 177 _index = noreg; 178 _scale = no_scale; 179 _disp = (intptr_t) loc; 180 _rspec = spec; 181 } 182 183 #endif // _LP64 184 185 186 187 // Convert the raw encoding form into the form expected by the constructor for 188 // Address. An index of 4 (rsp) corresponds to having no index, so convert 189 // that to noreg for the Address constructor. 190 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 191 RelocationHolder rspec; 192 if (disp_reloc != relocInfo::none) { 193 rspec = Relocation::spec_simple(disp_reloc); 194 } 195 bool valid_index = index != rsp->encoding(); 196 if (valid_index) { 197 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 198 madr._rspec = rspec; 199 return madr; 200 } else { 201 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 202 madr._rspec = rspec; 203 return madr; 204 } 205 } 206 207 // Implementation of Assembler 208 209 int AbstractAssembler::code_fill_byte() { 210 return (u_char)'\xF4'; // hlt 211 } 212 213 // make this go away someday 214 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { 215 if (rtype == relocInfo::none) 216 emit_int32(data); 217 else 218 emit_data(data, Relocation::spec_simple(rtype), format); 219 } 220 221 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { 222 assert(imm_operand == 0, "default format must be immediate in this file"); 223 assert(inst_mark() != NULL, "must be inside InstructionMark"); 224 if (rspec.type() != relocInfo::none) { 225 #ifdef ASSERT 226 check_relocation(rspec, format); 227 #endif 228 // Do not use AbstractAssembler::relocate, which is not intended for 229 // embedded words. Instead, relocate to the enclosing instruction. 230 231 // hack. call32 is too wide for mask so use disp32 232 if (format == call32_operand) 233 code_section()->relocate(inst_mark(), rspec, disp32_operand); 234 else 235 code_section()->relocate(inst_mark(), rspec, format); 236 } 237 emit_int32(data); 238 } 239 240 static int encode(Register r) { 241 int enc = r->encoding(); 242 if (enc >= 8) { 243 enc -= 8; 244 } 245 return enc; 246 } 247 248 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { 249 assert(dst->has_byte_register(), "must have byte register"); 250 assert(isByte(op1) && isByte(op2), "wrong opcode"); 251 assert(isByte(imm8), "not a byte"); 252 assert((op1 & 0x01) == 0, "should be 8bit operation"); 253 emit_int8(op1); 254 emit_int8(op2 | encode(dst)); 255 emit_int8(imm8); 256 } 257 258 259 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { 260 assert(isByte(op1) && isByte(op2), "wrong opcode"); 261 assert((op1 & 0x01) == 1, "should be 32bit operation"); 262 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 263 if (is8bit(imm32)) { 264 emit_int8(op1 | 0x02); // set sign bit 265 emit_int8(op2 | encode(dst)); 266 emit_int8(imm32 & 0xFF); 267 } else { 268 emit_int8(op1); 269 emit_int8(op2 | encode(dst)); 270 emit_int32(imm32); 271 } 272 } 273 274 // Force generation of a 4 byte immediate value even if it fits into 8bit 275 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { 276 assert(isByte(op1) && isByte(op2), "wrong opcode"); 277 assert((op1 & 0x01) == 1, "should be 32bit operation"); 278 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 279 emit_int8(op1); 280 emit_int8(op2 | encode(dst)); 281 emit_int32(imm32); 282 } 283 284 // immediate-to-memory forms 285 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { 286 assert((op1 & 0x01) == 1, "should be 32bit operation"); 287 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 288 if (is8bit(imm32)) { 289 emit_int8(op1 | 0x02); // set sign bit 290 emit_operand(rm, adr, 1); 291 emit_int8(imm32 & 0xFF); 292 } else { 293 emit_int8(op1); 294 emit_operand(rm, adr, 4); 295 emit_int32(imm32); 296 } 297 } 298 299 300 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { 301 assert(isByte(op1) && isByte(op2), "wrong opcode"); 302 emit_int8(op1); 303 emit_int8(op2 | encode(dst) << 3 | encode(src)); 304 } 305 306 307 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 308 int cur_tuple_type, int in_size_in_bits, int cur_encoding) { 309 int mod_idx = 0; 310 // We will test if the displacement fits the compressed format and if so 311 // apply the compression to the displacment iff the result is8bit. 312 if (VM_Version::supports_evex() && is_evex_inst) { 313 switch (cur_tuple_type) { 314 case EVEX_FV: 315 if ((cur_encoding & VEX_W) == VEX_W) { 316 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 317 } else { 318 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 319 } 320 break; 321 322 case EVEX_HV: 323 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 324 break; 325 326 case EVEX_FVM: 327 break; 328 329 case EVEX_T1S: 330 switch (in_size_in_bits) { 331 case EVEX_8bit: 332 break; 333 334 case EVEX_16bit: 335 mod_idx = 1; 336 break; 337 338 case EVEX_32bit: 339 mod_idx = 2; 340 break; 341 342 case EVEX_64bit: 343 mod_idx = 3; 344 break; 345 } 346 break; 347 348 case EVEX_T1F: 349 case EVEX_T2: 350 case EVEX_T4: 351 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0; 352 break; 353 354 case EVEX_T8: 355 break; 356 357 case EVEX_HVM: 358 break; 359 360 case EVEX_QVM: 361 break; 362 363 case EVEX_OVM: 364 break; 365 366 case EVEX_M128: 367 break; 368 369 case EVEX_DUP: 370 break; 371 372 default: 373 assert(0, "no valid evex tuple_table entry"); 374 break; 375 } 376 377 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 378 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len]; 379 if ((disp % disp_factor) == 0) { 380 int new_disp = disp / disp_factor; 381 if ((-0x80 <= new_disp && new_disp < 0x80)) { 382 disp = new_disp; 383 } 384 } else { 385 return false; 386 } 387 } 388 } 389 return (-0x80 <= disp && disp < 0x80); 390 } 391 392 393 bool Assembler::emit_compressed_disp_byte(int &disp) { 394 int mod_idx = 0; 395 // We will test if the displacement fits the compressed format and if so 396 // apply the compression to the displacment iff the result is8bit. 397 if (VM_Version::supports_evex() && (_attributes != NULL) && _attributes->is_evex_instruction()) { 398 int evex_encoding = _attributes->get_evex_encoding(); 399 int tuple_type = _attributes->get_tuple_type(); 400 switch (tuple_type) { 401 case EVEX_FV: 402 if ((evex_encoding & VEX_W) == VEX_W) { 403 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 404 } else { 405 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 406 } 407 break; 408 409 case EVEX_HV: 410 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 411 break; 412 413 case EVEX_FVM: 414 break; 415 416 case EVEX_T1S: 417 switch (_attributes->get_input_size()) { 418 case EVEX_8bit: 419 break; 420 421 case EVEX_16bit: 422 mod_idx = 1; 423 break; 424 425 case EVEX_32bit: 426 mod_idx = 2; 427 break; 428 429 case EVEX_64bit: 430 mod_idx = 3; 431 break; 432 } 433 break; 434 435 case EVEX_T1F: 436 case EVEX_T2: 437 case EVEX_T4: 438 mod_idx = (_attributes->get_input_size() == EVEX_64bit) ? 1 : 0; 439 break; 440 441 case EVEX_T8: 442 break; 443 444 case EVEX_HVM: 445 break; 446 447 case EVEX_QVM: 448 break; 449 450 case EVEX_OVM: 451 break; 452 453 case EVEX_M128: 454 break; 455 456 case EVEX_DUP: 457 break; 458 459 default: 460 assert(0, "no valid evex tuple_table entry"); 461 break; 462 } 463 464 int vector_len = _attributes->get_vector_len(); 465 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 466 int disp_factor = tuple_table[tuple_type + mod_idx][vector_len]; 467 if ((disp % disp_factor) == 0) { 468 int new_disp = disp / disp_factor; 469 if (is8bit(new_disp)) { 470 disp = new_disp; 471 } 472 } else { 473 return false; 474 } 475 } 476 } 477 return is8bit(disp); 478 } 479 480 481 void Assembler::emit_operand(Register reg, Register base, Register index, 482 Address::ScaleFactor scale, int disp, 483 RelocationHolder const& rspec, 484 int rip_relative_correction) { 485 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); 486 487 // Encode the registers as needed in the fields they are used in 488 489 int regenc = encode(reg) << 3; 490 int indexenc = index->is_valid() ? encode(index) << 3 : 0; 491 int baseenc = base->is_valid() ? encode(base) : 0; 492 493 if (base->is_valid()) { 494 if (index->is_valid()) { 495 assert(scale != Address::no_scale, "inconsistent address"); 496 // [base + index*scale + disp] 497 if (disp == 0 && rtype == relocInfo::none && 498 base != rbp LP64_ONLY(&& base != r13)) { 499 // [base + index*scale] 500 // [00 reg 100][ss index base] 501 assert(index != rsp, "illegal addressing mode"); 502 emit_int8(0x04 | regenc); 503 emit_int8(scale << 6 | indexenc | baseenc); 504 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 505 // [base + index*scale + imm8] 506 // [01 reg 100][ss index base] imm8 507 assert(index != rsp, "illegal addressing mode"); 508 emit_int8(0x44 | regenc); 509 emit_int8(scale << 6 | indexenc | baseenc); 510 emit_int8(disp & 0xFF); 511 } else { 512 // [base + index*scale + disp32] 513 // [10 reg 100][ss index base] disp32 514 assert(index != rsp, "illegal addressing mode"); 515 emit_int8(0x84 | regenc); 516 emit_int8(scale << 6 | indexenc | baseenc); 517 emit_data(disp, rspec, disp32_operand); 518 } 519 } else if (base == rsp LP64_ONLY(|| base == r12)) { 520 // [rsp + disp] 521 if (disp == 0 && rtype == relocInfo::none) { 522 // [rsp] 523 // [00 reg 100][00 100 100] 524 emit_int8(0x04 | regenc); 525 emit_int8(0x24); 526 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 527 // [rsp + imm8] 528 // [01 reg 100][00 100 100] disp8 529 emit_int8(0x44 | regenc); 530 emit_int8(0x24); 531 emit_int8(disp & 0xFF); 532 } else { 533 // [rsp + imm32] 534 // [10 reg 100][00 100 100] disp32 535 emit_int8(0x84 | regenc); 536 emit_int8(0x24); 537 emit_data(disp, rspec, disp32_operand); 538 } 539 } else { 540 // [base + disp] 541 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); 542 if (disp == 0 && rtype == relocInfo::none && 543 base != rbp LP64_ONLY(&& base != r13)) { 544 // [base] 545 // [00 reg base] 546 emit_int8(0x00 | regenc | baseenc); 547 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 548 // [base + disp8] 549 // [01 reg base] disp8 550 emit_int8(0x40 | regenc | baseenc); 551 emit_int8(disp & 0xFF); 552 } else { 553 // [base + disp32] 554 // [10 reg base] disp32 555 emit_int8(0x80 | regenc | baseenc); 556 emit_data(disp, rspec, disp32_operand); 557 } 558 } 559 } else { 560 if (index->is_valid()) { 561 assert(scale != Address::no_scale, "inconsistent address"); 562 // [index*scale + disp] 563 // [00 reg 100][ss index 101] disp32 564 assert(index != rsp, "illegal addressing mode"); 565 emit_int8(0x04 | regenc); 566 emit_int8(scale << 6 | indexenc | 0x05); 567 emit_data(disp, rspec, disp32_operand); 568 } else if (rtype != relocInfo::none ) { 569 // [disp] (64bit) RIP-RELATIVE (32bit) abs 570 // [00 000 101] disp32 571 572 emit_int8(0x05 | regenc); 573 // Note that the RIP-rel. correction applies to the generated 574 // disp field, but _not_ to the target address in the rspec. 575 576 // disp was created by converting the target address minus the pc 577 // at the start of the instruction. That needs more correction here. 578 // intptr_t disp = target - next_ip; 579 assert(inst_mark() != NULL, "must be inside InstructionMark"); 580 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; 581 int64_t adjusted = disp; 582 // Do rip-rel adjustment for 64bit 583 LP64_ONLY(adjusted -= (next_ip - inst_mark())); 584 assert(is_simm32(adjusted), 585 "must be 32bit offset (RIP relative address)"); 586 emit_data((int32_t) adjusted, rspec, disp32_operand); 587 588 } else { 589 // 32bit never did this, did everything as the rip-rel/disp code above 590 // [disp] ABSOLUTE 591 // [00 reg 100][00 100 101] disp32 592 emit_int8(0x04 | regenc); 593 emit_int8(0x25); 594 emit_data(disp, rspec, disp32_operand); 595 } 596 } 597 } 598 599 void Assembler::emit_operand(XMMRegister reg, Register base, Register index, 600 Address::ScaleFactor scale, int disp, 601 RelocationHolder const& rspec) { 602 if (UseAVX > 2) { 603 int xreg_enc = reg->encoding(); 604 if (xreg_enc > 15) { 605 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf); 606 emit_operand((Register)new_reg, base, index, scale, disp, rspec); 607 return; 608 } 609 } 610 emit_operand((Register)reg, base, index, scale, disp, rspec); 611 } 612 613 // Secret local extension to Assembler::WhichOperand: 614 #define end_pc_operand (_WhichOperand_limit) 615 616 address Assembler::locate_operand(address inst, WhichOperand which) { 617 // Decode the given instruction, and return the address of 618 // an embedded 32-bit operand word. 619 620 // If "which" is disp32_operand, selects the displacement portion 621 // of an effective address specifier. 622 // If "which" is imm64_operand, selects the trailing immediate constant. 623 // If "which" is call32_operand, selects the displacement of a call or jump. 624 // Caller is responsible for ensuring that there is such an operand, 625 // and that it is 32/64 bits wide. 626 627 // If "which" is end_pc_operand, find the end of the instruction. 628 629 address ip = inst; 630 bool is_64bit = false; 631 632 debug_only(bool has_disp32 = false); 633 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn 634 635 again_after_prefix: 636 switch (0xFF & *ip++) { 637 638 // These convenience macros generate groups of "case" labels for the switch. 639 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 640 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ 641 case (x)+4: case (x)+5: case (x)+6: case (x)+7 642 #define REP16(x) REP8((x)+0): \ 643 case REP8((x)+8) 644 645 case CS_segment: 646 case SS_segment: 647 case DS_segment: 648 case ES_segment: 649 case FS_segment: 650 case GS_segment: 651 // Seems dubious 652 LP64_ONLY(assert(false, "shouldn't have that prefix")); 653 assert(ip == inst+1, "only one prefix allowed"); 654 goto again_after_prefix; 655 656 case 0x67: 657 case REX: 658 case REX_B: 659 case REX_X: 660 case REX_XB: 661 case REX_R: 662 case REX_RB: 663 case REX_RX: 664 case REX_RXB: 665 NOT_LP64(assert(false, "64bit prefixes")); 666 goto again_after_prefix; 667 668 case REX_W: 669 case REX_WB: 670 case REX_WX: 671 case REX_WXB: 672 case REX_WR: 673 case REX_WRB: 674 case REX_WRX: 675 case REX_WRXB: 676 NOT_LP64(assert(false, "64bit prefixes")); 677 is_64bit = true; 678 goto again_after_prefix; 679 680 case 0xFF: // pushq a; decl a; incl a; call a; jmp a 681 case 0x88: // movb a, r 682 case 0x89: // movl a, r 683 case 0x8A: // movb r, a 684 case 0x8B: // movl r, a 685 case 0x8F: // popl a 686 debug_only(has_disp32 = true); 687 break; 688 689 case 0x68: // pushq #32 690 if (which == end_pc_operand) { 691 return ip + 4; 692 } 693 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); 694 return ip; // not produced by emit_operand 695 696 case 0x66: // movw ... (size prefix) 697 again_after_size_prefix2: 698 switch (0xFF & *ip++) { 699 case REX: 700 case REX_B: 701 case REX_X: 702 case REX_XB: 703 case REX_R: 704 case REX_RB: 705 case REX_RX: 706 case REX_RXB: 707 case REX_W: 708 case REX_WB: 709 case REX_WX: 710 case REX_WXB: 711 case REX_WR: 712 case REX_WRB: 713 case REX_WRX: 714 case REX_WRXB: 715 NOT_LP64(assert(false, "64bit prefix found")); 716 goto again_after_size_prefix2; 717 case 0x8B: // movw r, a 718 case 0x89: // movw a, r 719 debug_only(has_disp32 = true); 720 break; 721 case 0xC7: // movw a, #16 722 debug_only(has_disp32 = true); 723 tail_size = 2; // the imm16 724 break; 725 case 0x0F: // several SSE/SSE2 variants 726 ip--; // reparse the 0x0F 727 goto again_after_prefix; 728 default: 729 ShouldNotReachHere(); 730 } 731 break; 732 733 case REP8(0xB8): // movl/q r, #32/#64(oop?) 734 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); 735 // these asserts are somewhat nonsensical 736 #ifndef _LP64 737 assert(which == imm_operand || which == disp32_operand, 738 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 739 #else 740 assert((which == call32_operand || which == imm_operand) && is_64bit || 741 which == narrow_oop_operand && !is_64bit, 742 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 743 #endif // _LP64 744 return ip; 745 746 case 0x69: // imul r, a, #32 747 case 0xC7: // movl a, #32(oop?) 748 tail_size = 4; 749 debug_only(has_disp32 = true); // has both kinds of operands! 750 break; 751 752 case 0x0F: // movx..., etc. 753 switch (0xFF & *ip++) { 754 case 0x3A: // pcmpestri 755 tail_size = 1; 756 case 0x38: // ptest, pmovzxbw 757 ip++; // skip opcode 758 debug_only(has_disp32 = true); // has both kinds of operands! 759 break; 760 761 case 0x70: // pshufd r, r/a, #8 762 debug_only(has_disp32 = true); // has both kinds of operands! 763 case 0x73: // psrldq r, #8 764 tail_size = 1; 765 break; 766 767 case 0x12: // movlps 768 case 0x28: // movaps 769 case 0x2E: // ucomiss 770 case 0x2F: // comiss 771 case 0x54: // andps 772 case 0x55: // andnps 773 case 0x56: // orps 774 case 0x57: // xorps 775 case 0x58: // addpd 776 case 0x59: // mulpd 777 case 0x6E: // movd 778 case 0x7E: // movd 779 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush 780 debug_only(has_disp32 = true); 781 break; 782 783 case 0xAD: // shrd r, a, %cl 784 case 0xAF: // imul r, a 785 case 0xBE: // movsbl r, a (movsxb) 786 case 0xBF: // movswl r, a (movsxw) 787 case 0xB6: // movzbl r, a (movzxb) 788 case 0xB7: // movzwl r, a (movzxw) 789 case REP16(0x40): // cmovl cc, r, a 790 case 0xB0: // cmpxchgb 791 case 0xB1: // cmpxchg 792 case 0xC1: // xaddl 793 case 0xC7: // cmpxchg8 794 case REP16(0x90): // setcc a 795 debug_only(has_disp32 = true); 796 // fall out of the switch to decode the address 797 break; 798 799 case 0xC4: // pinsrw r, a, #8 800 debug_only(has_disp32 = true); 801 case 0xC5: // pextrw r, r, #8 802 tail_size = 1; // the imm8 803 break; 804 805 case 0xAC: // shrd r, a, #8 806 debug_only(has_disp32 = true); 807 tail_size = 1; // the imm8 808 break; 809 810 case REP16(0x80): // jcc rdisp32 811 if (which == end_pc_operand) return ip + 4; 812 assert(which == call32_operand, "jcc has no disp32 or imm"); 813 return ip; 814 default: 815 ShouldNotReachHere(); 816 } 817 break; 818 819 case 0x81: // addl a, #32; addl r, #32 820 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 821 // on 32bit in the case of cmpl, the imm might be an oop 822 tail_size = 4; 823 debug_only(has_disp32 = true); // has both kinds of operands! 824 break; 825 826 case 0x83: // addl a, #8; addl r, #8 827 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 828 debug_only(has_disp32 = true); // has both kinds of operands! 829 tail_size = 1; 830 break; 831 832 case 0x9B: 833 switch (0xFF & *ip++) { 834 case 0xD9: // fnstcw a 835 debug_only(has_disp32 = true); 836 break; 837 default: 838 ShouldNotReachHere(); 839 } 840 break; 841 842 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a 843 case REP4(0x10): // adc... 844 case REP4(0x20): // and... 845 case REP4(0x30): // xor... 846 case REP4(0x08): // or... 847 case REP4(0x18): // sbb... 848 case REP4(0x28): // sub... 849 case 0xF7: // mull a 850 case 0x8D: // lea r, a 851 case 0x87: // xchg r, a 852 case REP4(0x38): // cmp... 853 case 0x85: // test r, a 854 debug_only(has_disp32 = true); // has both kinds of operands! 855 break; 856 857 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 858 case 0xC6: // movb a, #8 859 case 0x80: // cmpb a, #8 860 case 0x6B: // imul r, a, #8 861 debug_only(has_disp32 = true); // has both kinds of operands! 862 tail_size = 1; // the imm8 863 break; 864 865 case 0xC4: // VEX_3bytes 866 case 0xC5: // VEX_2bytes 867 assert((UseAVX > 0), "shouldn't have VEX prefix"); 868 assert(ip == inst+1, "no prefixes allowed"); 869 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions 870 // but they have prefix 0x0F and processed when 0x0F processed above. 871 // 872 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES 873 // instructions (these instructions are not supported in 64-bit mode). 874 // To distinguish them bits [7:6] are set in the VEX second byte since 875 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set 876 // those VEX bits REX and vvvv bits are inverted. 877 // 878 // Fortunately C2 doesn't generate these instructions so we don't need 879 // to check for them in product version. 880 881 // Check second byte 882 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions")); 883 884 int vex_opcode; 885 // First byte 886 if ((0xFF & *inst) == VEX_3bytes) { 887 vex_opcode = VEX_OPCODE_MASK & *ip; 888 ip++; // third byte 889 is_64bit = ((VEX_W & *ip) == VEX_W); 890 } else { 891 vex_opcode = VEX_OPCODE_0F; 892 } 893 ip++; // opcode 894 // To find the end of instruction (which == end_pc_operand). 895 switch (vex_opcode) { 896 case VEX_OPCODE_0F: 897 switch (0xFF & *ip) { 898 case 0x70: // pshufd r, r/a, #8 899 case 0x71: // ps[rl|ra|ll]w r, #8 900 case 0x72: // ps[rl|ra|ll]d r, #8 901 case 0x73: // ps[rl|ra|ll]q r, #8 902 case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8 903 case 0xC4: // pinsrw r, r, r/a, #8 904 case 0xC5: // pextrw r/a, r, #8 905 case 0xC6: // shufp[s|d] r, r, r/a, #8 906 tail_size = 1; // the imm8 907 break; 908 } 909 break; 910 case VEX_OPCODE_0F_3A: 911 tail_size = 1; 912 break; 913 } 914 ip++; // skip opcode 915 debug_only(has_disp32 = true); // has both kinds of operands! 916 break; 917 918 case 0x62: // EVEX_4bytes 919 assert((UseAVX > 0), "shouldn't have EVEX prefix"); 920 assert(ip == inst+1, "no prefixes allowed"); 921 // no EVEX collisions, all instructions that have 0x62 opcodes 922 // have EVEX versions and are subopcodes of 0x66 923 ip++; // skip P0 and exmaine W in P1 924 is_64bit = ((VEX_W & *ip) == VEX_W); 925 ip++; // move to P2 926 ip++; // skip P2, move to opcode 927 // To find the end of instruction (which == end_pc_operand). 928 switch (0xFF & *ip) { 929 case 0x61: // pcmpestri r, r/a, #8 930 case 0x70: // pshufd r, r/a, #8 931 case 0x73: // psrldq r, #8 932 tail_size = 1; // the imm8 933 break; 934 default: 935 break; 936 } 937 ip++; // skip opcode 938 debug_only(has_disp32 = true); // has both kinds of operands! 939 break; 940 941 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 942 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl 943 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a 944 case 0xDD: // fld_d a; fst_d a; fstp_d a 945 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a 946 case 0xDF: // fild_d a; fistp_d a 947 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a 948 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a 949 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a 950 debug_only(has_disp32 = true); 951 break; 952 953 case 0xE8: // call rdisp32 954 case 0xE9: // jmp rdisp32 955 if (which == end_pc_operand) return ip + 4; 956 assert(which == call32_operand, "call has no disp32 or imm"); 957 return ip; 958 959 case 0xF0: // Lock 960 assert(os::is_MP(), "only on MP"); 961 goto again_after_prefix; 962 963 case 0xF3: // For SSE 964 case 0xF2: // For SSE2 965 switch (0xFF & *ip++) { 966 case REX: 967 case REX_B: 968 case REX_X: 969 case REX_XB: 970 case REX_R: 971 case REX_RB: 972 case REX_RX: 973 case REX_RXB: 974 case REX_W: 975 case REX_WB: 976 case REX_WX: 977 case REX_WXB: 978 case REX_WR: 979 case REX_WRB: 980 case REX_WRX: 981 case REX_WRXB: 982 NOT_LP64(assert(false, "found 64bit prefix")); 983 ip++; 984 default: 985 ip++; 986 } 987 debug_only(has_disp32 = true); // has both kinds of operands! 988 break; 989 990 default: 991 ShouldNotReachHere(); 992 993 #undef REP8 994 #undef REP16 995 } 996 997 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); 998 #ifdef _LP64 999 assert(which != imm_operand, "instruction is not a movq reg, imm64"); 1000 #else 1001 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); 1002 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); 1003 #endif // LP64 1004 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); 1005 1006 // parse the output of emit_operand 1007 int op2 = 0xFF & *ip++; 1008 int base = op2 & 0x07; 1009 int op3 = -1; 1010 const int b100 = 4; 1011 const int b101 = 5; 1012 if (base == b100 && (op2 >> 6) != 3) { 1013 op3 = 0xFF & *ip++; 1014 base = op3 & 0x07; // refetch the base 1015 } 1016 // now ip points at the disp (if any) 1017 1018 switch (op2 >> 6) { 1019 case 0: 1020 // [00 reg 100][ss index base] 1021 // [00 reg 100][00 100 esp] 1022 // [00 reg base] 1023 // [00 reg 100][ss index 101][disp32] 1024 // [00 reg 101] [disp32] 1025 1026 if (base == b101) { 1027 if (which == disp32_operand) 1028 return ip; // caller wants the disp32 1029 ip += 4; // skip the disp32 1030 } 1031 break; 1032 1033 case 1: 1034 // [01 reg 100][ss index base][disp8] 1035 // [01 reg 100][00 100 esp][disp8] 1036 // [01 reg base] [disp8] 1037 ip += 1; // skip the disp8 1038 break; 1039 1040 case 2: 1041 // [10 reg 100][ss index base][disp32] 1042 // [10 reg 100][00 100 esp][disp32] 1043 // [10 reg base] [disp32] 1044 if (which == disp32_operand) 1045 return ip; // caller wants the disp32 1046 ip += 4; // skip the disp32 1047 break; 1048 1049 case 3: 1050 // [11 reg base] (not a memory addressing mode) 1051 break; 1052 } 1053 1054 if (which == end_pc_operand) { 1055 return ip + tail_size; 1056 } 1057 1058 #ifdef _LP64 1059 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); 1060 #else 1061 assert(which == imm_operand, "instruction has only an imm field"); 1062 #endif // LP64 1063 return ip; 1064 } 1065 1066 address Assembler::locate_next_instruction(address inst) { 1067 // Secretly share code with locate_operand: 1068 return locate_operand(inst, end_pc_operand); 1069 } 1070 1071 1072 #ifdef ASSERT 1073 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { 1074 address inst = inst_mark(); 1075 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); 1076 address opnd; 1077 1078 Relocation* r = rspec.reloc(); 1079 if (r->type() == relocInfo::none) { 1080 return; 1081 } else if (r->is_call() || format == call32_operand) { 1082 // assert(format == imm32_operand, "cannot specify a nonzero format"); 1083 opnd = locate_operand(inst, call32_operand); 1084 } else if (r->is_data()) { 1085 assert(format == imm_operand || format == disp32_operand 1086 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); 1087 opnd = locate_operand(inst, (WhichOperand)format); 1088 } else { 1089 assert(format == imm_operand, "cannot specify a format"); 1090 return; 1091 } 1092 assert(opnd == pc(), "must put operand where relocs can find it"); 1093 } 1094 #endif // ASSERT 1095 1096 void Assembler::emit_operand32(Register reg, Address adr) { 1097 assert(reg->encoding() < 8, "no extended registers"); 1098 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1099 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1100 adr._rspec); 1101 } 1102 1103 void Assembler::emit_operand(Register reg, Address adr, 1104 int rip_relative_correction) { 1105 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1106 adr._rspec, 1107 rip_relative_correction); 1108 } 1109 1110 void Assembler::emit_operand(XMMRegister reg, Address adr) { 1111 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1112 adr._rspec); 1113 } 1114 1115 // MMX operations 1116 void Assembler::emit_operand(MMXRegister reg, Address adr) { 1117 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1118 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1119 } 1120 1121 // work around gcc (3.2.1-7a) bug 1122 void Assembler::emit_operand(Address adr, MMXRegister reg) { 1123 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1124 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1125 } 1126 1127 1128 void Assembler::emit_farith(int b1, int b2, int i) { 1129 assert(isByte(b1) && isByte(b2), "wrong opcode"); 1130 assert(0 <= i && i < 8, "illegal stack offset"); 1131 emit_int8(b1); 1132 emit_int8(b2 + i); 1133 } 1134 1135 1136 // Now the Assembler instructions (identical for 32/64 bits) 1137 1138 void Assembler::adcl(Address dst, int32_t imm32) { 1139 InstructionMark im(this); 1140 prefix(dst); 1141 emit_arith_operand(0x81, rdx, dst, imm32); 1142 } 1143 1144 void Assembler::adcl(Address dst, Register src) { 1145 InstructionMark im(this); 1146 prefix(dst, src); 1147 emit_int8(0x11); 1148 emit_operand(src, dst); 1149 } 1150 1151 void Assembler::adcl(Register dst, int32_t imm32) { 1152 prefix(dst); 1153 emit_arith(0x81, 0xD0, dst, imm32); 1154 } 1155 1156 void Assembler::adcl(Register dst, Address src) { 1157 InstructionMark im(this); 1158 prefix(src, dst); 1159 emit_int8(0x13); 1160 emit_operand(dst, src); 1161 } 1162 1163 void Assembler::adcl(Register dst, Register src) { 1164 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1165 emit_arith(0x13, 0xC0, dst, src); 1166 } 1167 1168 void Assembler::addl(Address dst, int32_t imm32) { 1169 InstructionMark im(this); 1170 prefix(dst); 1171 emit_arith_operand(0x81, rax, dst, imm32); 1172 } 1173 1174 void Assembler::addl(Address dst, Register src) { 1175 InstructionMark im(this); 1176 prefix(dst, src); 1177 emit_int8(0x01); 1178 emit_operand(src, dst); 1179 } 1180 1181 void Assembler::addl(Register dst, int32_t imm32) { 1182 prefix(dst); 1183 emit_arith(0x81, 0xC0, dst, imm32); 1184 } 1185 1186 void Assembler::addl(Register dst, Address src) { 1187 InstructionMark im(this); 1188 prefix(src, dst); 1189 emit_int8(0x03); 1190 emit_operand(dst, src); 1191 } 1192 1193 void Assembler::addl(Register dst, Register src) { 1194 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1195 emit_arith(0x03, 0xC0, dst, src); 1196 } 1197 1198 void Assembler::addr_nop_4() { 1199 assert(UseAddressNop, "no CPU support"); 1200 // 4 bytes: NOP DWORD PTR [EAX+0] 1201 emit_int8(0x0F); 1202 emit_int8(0x1F); 1203 emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); 1204 emit_int8(0); // 8-bits offset (1 byte) 1205 } 1206 1207 void Assembler::addr_nop_5() { 1208 assert(UseAddressNop, "no CPU support"); 1209 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset 1210 emit_int8(0x0F); 1211 emit_int8(0x1F); 1212 emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); 1213 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1214 emit_int8(0); // 8-bits offset (1 byte) 1215 } 1216 1217 void Assembler::addr_nop_7() { 1218 assert(UseAddressNop, "no CPU support"); 1219 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset 1220 emit_int8(0x0F); 1221 emit_int8(0x1F); 1222 emit_int8((unsigned char)0x80); 1223 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); 1224 emit_int32(0); // 32-bits offset (4 bytes) 1225 } 1226 1227 void Assembler::addr_nop_8() { 1228 assert(UseAddressNop, "no CPU support"); 1229 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset 1230 emit_int8(0x0F); 1231 emit_int8(0x1F); 1232 emit_int8((unsigned char)0x84); 1233 // emit_rm(cbuf, 0x2, EAX_enc, 0x4); 1234 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1235 emit_int32(0); // 32-bits offset (4 bytes) 1236 } 1237 1238 void Assembler::addsd(XMMRegister dst, XMMRegister src) { 1239 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1240 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1241 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1242 emit_int8(0x58); 1243 emit_int8((unsigned char)(0xC0 | encode)); 1244 } 1245 1246 void Assembler::addsd(XMMRegister dst, Address src) { 1247 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1248 InstructionMark im(this); 1249 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1250 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1251 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1252 emit_int8(0x58); 1253 emit_operand(dst, src); 1254 } 1255 1256 void Assembler::addss(XMMRegister dst, XMMRegister src) { 1257 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1258 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1259 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1260 emit_int8(0x58); 1261 emit_int8((unsigned char)(0xC0 | encode)); 1262 } 1263 1264 void Assembler::addss(XMMRegister dst, Address src) { 1265 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1266 InstructionMark im(this); 1267 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1268 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1269 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1270 emit_int8(0x58); 1271 emit_operand(dst, src); 1272 } 1273 1274 void Assembler::aesdec(XMMRegister dst, Address src) { 1275 assert(VM_Version::supports_aes(), ""); 1276 InstructionMark im(this); 1277 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1278 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1279 emit_int8((unsigned char)0xDE); 1280 emit_operand(dst, src); 1281 } 1282 1283 void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1284 assert(VM_Version::supports_aes(), ""); 1285 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1286 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1287 emit_int8((unsigned char)0xDE); 1288 emit_int8(0xC0 | encode); 1289 } 1290 1291 void Assembler::aesdeclast(XMMRegister dst, Address src) { 1292 assert(VM_Version::supports_aes(), ""); 1293 InstructionMark im(this); 1294 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1295 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1296 emit_int8((unsigned char)0xDF); 1297 emit_operand(dst, src); 1298 } 1299 1300 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1301 assert(VM_Version::supports_aes(), ""); 1302 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1303 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1304 emit_int8((unsigned char)0xDF); 1305 emit_int8((unsigned char)(0xC0 | encode)); 1306 } 1307 1308 void Assembler::aesenc(XMMRegister dst, Address src) { 1309 assert(VM_Version::supports_aes(), ""); 1310 InstructionMark im(this); 1311 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1312 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1313 emit_int8((unsigned char)0xDC); 1314 emit_operand(dst, src); 1315 } 1316 1317 void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1318 assert(VM_Version::supports_aes(), ""); 1319 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1320 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1321 emit_int8((unsigned char)0xDC); 1322 emit_int8(0xC0 | encode); 1323 } 1324 1325 void Assembler::aesenclast(XMMRegister dst, Address src) { 1326 assert(VM_Version::supports_aes(), ""); 1327 InstructionMark im(this); 1328 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1329 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1330 emit_int8((unsigned char)0xDD); 1331 emit_operand(dst, src); 1332 } 1333 1334 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1335 assert(VM_Version::supports_aes(), ""); 1336 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1337 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1338 emit_int8((unsigned char)0xDD); 1339 emit_int8((unsigned char)(0xC0 | encode)); 1340 } 1341 1342 void Assembler::andl(Address dst, int32_t imm32) { 1343 InstructionMark im(this); 1344 prefix(dst); 1345 emit_int8((unsigned char)0x81); 1346 emit_operand(rsp, dst, 4); 1347 emit_int32(imm32); 1348 } 1349 1350 void Assembler::andl(Register dst, int32_t imm32) { 1351 prefix(dst); 1352 emit_arith(0x81, 0xE0, dst, imm32); 1353 } 1354 1355 void Assembler::andl(Register dst, Address src) { 1356 InstructionMark im(this); 1357 prefix(src, dst); 1358 emit_int8(0x23); 1359 emit_operand(dst, src); 1360 } 1361 1362 void Assembler::andl(Register dst, Register src) { 1363 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1364 emit_arith(0x23, 0xC0, dst, src); 1365 } 1366 1367 void Assembler::andnl(Register dst, Register src1, Register src2) { 1368 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1369 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1370 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1371 emit_int8((unsigned char)0xF2); 1372 emit_int8((unsigned char)(0xC0 | encode)); 1373 } 1374 1375 void Assembler::andnl(Register dst, Register src1, Address src2) { 1376 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1377 InstructionMark im(this); 1378 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1379 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1380 emit_int8((unsigned char)0xF2); 1381 emit_operand(dst, src2); 1382 } 1383 1384 void Assembler::bsfl(Register dst, Register src) { 1385 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1386 emit_int8(0x0F); 1387 emit_int8((unsigned char)0xBC); 1388 emit_int8((unsigned char)(0xC0 | encode)); 1389 } 1390 1391 void Assembler::bsrl(Register dst, Register src) { 1392 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1393 emit_int8(0x0F); 1394 emit_int8((unsigned char)0xBD); 1395 emit_int8((unsigned char)(0xC0 | encode)); 1396 } 1397 1398 void Assembler::bswapl(Register reg) { // bswap 1399 int encode = prefix_and_encode(reg->encoding()); 1400 emit_int8(0x0F); 1401 emit_int8((unsigned char)(0xC8 | encode)); 1402 } 1403 1404 void Assembler::blsil(Register dst, Register src) { 1405 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1406 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1407 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1408 emit_int8((unsigned char)0xF3); 1409 emit_int8((unsigned char)(0xC0 | encode)); 1410 } 1411 1412 void Assembler::blsil(Register dst, Address src) { 1413 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1414 InstructionMark im(this); 1415 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1416 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1417 emit_int8((unsigned char)0xF3); 1418 emit_operand(rbx, src); 1419 } 1420 1421 void Assembler::blsmskl(Register dst, Register src) { 1422 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1423 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1424 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1425 emit_int8((unsigned char)0xF3); 1426 emit_int8((unsigned char)(0xC0 | encode)); 1427 } 1428 1429 void Assembler::blsmskl(Register dst, Address src) { 1430 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1431 InstructionMark im(this); 1432 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1433 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1434 emit_int8((unsigned char)0xF3); 1435 emit_operand(rdx, src); 1436 } 1437 1438 void Assembler::blsrl(Register dst, Register src) { 1439 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1440 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1441 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1442 emit_int8((unsigned char)0xF3); 1443 emit_int8((unsigned char)(0xC0 | encode)); 1444 } 1445 1446 void Assembler::blsrl(Register dst, Address src) { 1447 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1448 InstructionMark im(this); 1449 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 1450 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1451 emit_int8((unsigned char)0xF3); 1452 emit_operand(rcx, src); 1453 } 1454 1455 void Assembler::call(Label& L, relocInfo::relocType rtype) { 1456 // suspect disp32 is always good 1457 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); 1458 1459 if (L.is_bound()) { 1460 const int long_size = 5; 1461 int offs = (int)( target(L) - pc() ); 1462 assert(offs <= 0, "assembler error"); 1463 InstructionMark im(this); 1464 // 1110 1000 #32-bit disp 1465 emit_int8((unsigned char)0xE8); 1466 emit_data(offs - long_size, rtype, operand); 1467 } else { 1468 InstructionMark im(this); 1469 // 1110 1000 #32-bit disp 1470 L.add_patch_at(code(), locator()); 1471 1472 emit_int8((unsigned char)0xE8); 1473 emit_data(int(0), rtype, operand); 1474 } 1475 } 1476 1477 void Assembler::call(Register dst) { 1478 int encode = prefix_and_encode(dst->encoding()); 1479 emit_int8((unsigned char)0xFF); 1480 emit_int8((unsigned char)(0xD0 | encode)); 1481 } 1482 1483 1484 void Assembler::call(Address adr) { 1485 InstructionMark im(this); 1486 prefix(adr); 1487 emit_int8((unsigned char)0xFF); 1488 emit_operand(rdx, adr); 1489 } 1490 1491 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1492 assert(entry != NULL, "call most probably wrong"); 1493 InstructionMark im(this); 1494 emit_int8((unsigned char)0xE8); 1495 intptr_t disp = entry - (pc() + sizeof(int32_t)); 1496 assert(is_simm32(disp), "must be 32bit offset (call2)"); 1497 // Technically, should use call32_operand, but this format is 1498 // implied by the fact that we're emitting a call instruction. 1499 1500 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1501 emit_data((int) disp, rspec, operand); 1502 } 1503 1504 void Assembler::cdql() { 1505 emit_int8((unsigned char)0x99); 1506 } 1507 1508 void Assembler::cld() { 1509 emit_int8((unsigned char)0xFC); 1510 } 1511 1512 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1513 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1514 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1515 emit_int8(0x0F); 1516 emit_int8(0x40 | cc); 1517 emit_int8((unsigned char)(0xC0 | encode)); 1518 } 1519 1520 1521 void Assembler::cmovl(Condition cc, Register dst, Address src) { 1522 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1523 prefix(src, dst); 1524 emit_int8(0x0F); 1525 emit_int8(0x40 | cc); 1526 emit_operand(dst, src); 1527 } 1528 1529 void Assembler::cmpb(Address dst, int imm8) { 1530 InstructionMark im(this); 1531 prefix(dst); 1532 emit_int8((unsigned char)0x80); 1533 emit_operand(rdi, dst, 1); 1534 emit_int8(imm8); 1535 } 1536 1537 void Assembler::cmpl(Address dst, int32_t imm32) { 1538 InstructionMark im(this); 1539 prefix(dst); 1540 emit_int8((unsigned char)0x81); 1541 emit_operand(rdi, dst, 4); 1542 emit_int32(imm32); 1543 } 1544 1545 void Assembler::cmpl(Register dst, int32_t imm32) { 1546 prefix(dst); 1547 emit_arith(0x81, 0xF8, dst, imm32); 1548 } 1549 1550 void Assembler::cmpl(Register dst, Register src) { 1551 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1552 emit_arith(0x3B, 0xC0, dst, src); 1553 } 1554 1555 void Assembler::cmpl(Register dst, Address src) { 1556 InstructionMark im(this); 1557 prefix(src, dst); 1558 emit_int8((unsigned char)0x3B); 1559 emit_operand(dst, src); 1560 } 1561 1562 void Assembler::cmpw(Address dst, int imm16) { 1563 InstructionMark im(this); 1564 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); 1565 emit_int8(0x66); 1566 emit_int8((unsigned char)0x81); 1567 emit_operand(rdi, dst, 2); 1568 emit_int16(imm16); 1569 } 1570 1571 // The 32-bit cmpxchg compares the value at adr with the contents of rax, 1572 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1573 // The ZF is set if the compared values were equal, and cleared otherwise. 1574 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg 1575 InstructionMark im(this); 1576 prefix(adr, reg); 1577 emit_int8(0x0F); 1578 emit_int8((unsigned char)0xB1); 1579 emit_operand(reg, adr); 1580 } 1581 1582 // The 8-bit cmpxchg compares the value at adr with the contents of rax, 1583 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1584 // The ZF is set if the compared values were equal, and cleared otherwise. 1585 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg 1586 InstructionMark im(this); 1587 prefix(adr, reg, true); 1588 emit_int8(0x0F); 1589 emit_int8((unsigned char)0xB0); 1590 emit_operand(reg, adr); 1591 } 1592 1593 void Assembler::comisd(XMMRegister dst, Address src) { 1594 // NOTE: dbx seems to decode this as comiss even though the 1595 // 0x66 is there. Strangly ucomisd comes out correct 1596 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1597 InstructionMark im(this); 1598 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);; 1599 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1600 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1601 emit_int8(0x2F); 1602 emit_operand(dst, src); 1603 } 1604 1605 void Assembler::comisd(XMMRegister dst, XMMRegister src) { 1606 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1607 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1608 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1609 emit_int8(0x2F); 1610 emit_int8((unsigned char)(0xC0 | encode)); 1611 } 1612 1613 void Assembler::comiss(XMMRegister dst, Address src) { 1614 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1615 InstructionMark im(this); 1616 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1617 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1618 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1619 emit_int8(0x2F); 1620 emit_operand(dst, src); 1621 } 1622 1623 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1624 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1625 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1626 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1627 emit_int8(0x2F); 1628 emit_int8((unsigned char)(0xC0 | encode)); 1629 } 1630 1631 void Assembler::cpuid() { 1632 emit_int8(0x0F); 1633 emit_int8((unsigned char)0xA2); 1634 } 1635 1636 // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented 1637 // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v 1638 // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. - 1639 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. - 1640 // 1641 // F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v 1642 // 1643 // F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v 1644 // 1645 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v 1646 void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) { 1647 assert(VM_Version::supports_sse4_2(), ""); 1648 int8_t w = 0x01; 1649 Prefix p = Prefix_EMPTY; 1650 1651 emit_int8((int8_t)0xF2); 1652 switch (sizeInBytes) { 1653 case 1: 1654 w = 0; 1655 break; 1656 case 2: 1657 case 4: 1658 break; 1659 LP64_ONLY(case 8:) 1660 // This instruction is not valid in 32 bits 1661 // Note: 1662 // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf 1663 // 1664 // Page B - 72 Vol. 2C says 1665 // qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2 1666 // mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m 1667 // F0!!! 1668 // while 3 - 208 Vol. 2A 1669 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64. 1670 // 1671 // the 0 on a last bit is reserved for a different flavor of this instruction : 1672 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8. 1673 p = REX_W; 1674 break; 1675 default: 1676 assert(0, "Unsupported value for a sizeInBytes argument"); 1677 break; 1678 } 1679 LP64_ONLY(prefix(crc, v, p);) 1680 emit_int8((int8_t)0x0F); 1681 emit_int8(0x38); 1682 emit_int8((int8_t)(0xF0 | w)); 1683 emit_int8(0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7)); 1684 } 1685 1686 void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) { 1687 assert(VM_Version::supports_sse4_2(), ""); 1688 InstructionMark im(this); 1689 int8_t w = 0x01; 1690 Prefix p = Prefix_EMPTY; 1691 1692 emit_int8((int8_t)0xF2); 1693 switch (sizeInBytes) { 1694 case 1: 1695 w = 0; 1696 break; 1697 case 2: 1698 case 4: 1699 break; 1700 LP64_ONLY(case 8:) 1701 // This instruction is not valid in 32 bits 1702 p = REX_W; 1703 break; 1704 default: 1705 assert(0, "Unsupported value for a sizeInBytes argument"); 1706 break; 1707 } 1708 LP64_ONLY(prefix(crc, adr, p);) 1709 emit_int8((int8_t)0x0F); 1710 emit_int8(0x38); 1711 emit_int8((int8_t)(0xF0 | w)); 1712 emit_operand(crc, adr); 1713 } 1714 1715 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1716 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1717 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 1718 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1719 emit_int8((unsigned char)0xE6); 1720 emit_int8((unsigned char)(0xC0 | encode)); 1721 } 1722 1723 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { 1724 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1725 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 1726 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1727 emit_int8(0x5B); 1728 emit_int8((unsigned char)(0xC0 | encode)); 1729 } 1730 1731 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1732 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1733 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1734 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1735 emit_int8(0x5A); 1736 emit_int8((unsigned char)(0xC0 | encode)); 1737 } 1738 1739 void Assembler::cvtsd2ss(XMMRegister dst, Address src) { 1740 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1741 InstructionMark im(this); 1742 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1743 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1744 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1745 emit_int8(0x5A); 1746 emit_operand(dst, src); 1747 } 1748 1749 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { 1750 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1751 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1752 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1753 emit_int8(0x2A); 1754 emit_int8((unsigned char)(0xC0 | encode)); 1755 } 1756 1757 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { 1758 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1759 InstructionMark im(this); 1760 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1761 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1762 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1763 emit_int8(0x2A); 1764 emit_operand(dst, src); 1765 } 1766 1767 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { 1768 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1769 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1770 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1771 emit_int8(0x2A); 1772 emit_int8((unsigned char)(0xC0 | encode)); 1773 } 1774 1775 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { 1776 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1777 InstructionMark im(this); 1778 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1779 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1780 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1781 emit_int8(0x2A); 1782 emit_operand(dst, src); 1783 } 1784 1785 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { 1786 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1787 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1788 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1789 emit_int8(0x2A); 1790 emit_int8((unsigned char)(0xC0 | encode)); 1791 } 1792 1793 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 1794 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1795 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1796 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1797 emit_int8(0x5A); 1798 emit_int8((unsigned char)(0xC0 | encode)); 1799 } 1800 1801 void Assembler::cvtss2sd(XMMRegister dst, Address src) { 1802 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1803 InstructionMark im(this); 1804 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1805 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1806 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1807 emit_int8(0x5A); 1808 emit_operand(dst, src); 1809 } 1810 1811 1812 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { 1813 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1814 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1815 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1816 emit_int8(0x2C); 1817 emit_int8((unsigned char)(0xC0 | encode)); 1818 } 1819 1820 void Assembler::cvttss2sil(Register dst, XMMRegister src) { 1821 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1822 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1823 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1824 emit_int8(0x2C); 1825 emit_int8((unsigned char)(0xC0 | encode)); 1826 } 1827 1828 void Assembler::decl(Address dst) { 1829 // Don't use it directly. Use MacroAssembler::decrement() instead. 1830 InstructionMark im(this); 1831 prefix(dst); 1832 emit_int8((unsigned char)0xFF); 1833 emit_operand(rcx, dst); 1834 } 1835 1836 void Assembler::divsd(XMMRegister dst, Address src) { 1837 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1838 InstructionMark im(this); 1839 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1840 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1841 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1842 emit_int8(0x5E); 1843 emit_operand(dst, src); 1844 } 1845 1846 void Assembler::divsd(XMMRegister dst, XMMRegister src) { 1847 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1848 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1849 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1850 emit_int8(0x5E); 1851 emit_int8((unsigned char)(0xC0 | encode)); 1852 } 1853 1854 void Assembler::divss(XMMRegister dst, Address src) { 1855 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1856 InstructionMark im(this); 1857 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1858 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1859 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1860 emit_int8(0x5E); 1861 emit_operand(dst, src); 1862 } 1863 1864 void Assembler::divss(XMMRegister dst, XMMRegister src) { 1865 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1866 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 1867 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1868 emit_int8(0x5E); 1869 emit_int8((unsigned char)(0xC0 | encode)); 1870 } 1871 1872 void Assembler::emms() { 1873 NOT_LP64(assert(VM_Version::supports_mmx(), "")); 1874 emit_int8(0x0F); 1875 emit_int8(0x77); 1876 } 1877 1878 void Assembler::hlt() { 1879 emit_int8((unsigned char)0xF4); 1880 } 1881 1882 void Assembler::idivl(Register src) { 1883 int encode = prefix_and_encode(src->encoding()); 1884 emit_int8((unsigned char)0xF7); 1885 emit_int8((unsigned char)(0xF8 | encode)); 1886 } 1887 1888 void Assembler::divl(Register src) { // Unsigned 1889 int encode = prefix_and_encode(src->encoding()); 1890 emit_int8((unsigned char)0xF7); 1891 emit_int8((unsigned char)(0xF0 | encode)); 1892 } 1893 1894 void Assembler::imull(Register src) { 1895 int encode = prefix_and_encode(src->encoding()); 1896 emit_int8((unsigned char)0xF7); 1897 emit_int8((unsigned char)(0xE8 | encode)); 1898 } 1899 1900 void Assembler::imull(Register dst, Register src) { 1901 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1902 emit_int8(0x0F); 1903 emit_int8((unsigned char)0xAF); 1904 emit_int8((unsigned char)(0xC0 | encode)); 1905 } 1906 1907 1908 void Assembler::imull(Register dst, Register src, int value) { 1909 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1910 if (is8bit(value)) { 1911 emit_int8(0x6B); 1912 emit_int8((unsigned char)(0xC0 | encode)); 1913 emit_int8(value & 0xFF); 1914 } else { 1915 emit_int8(0x69); 1916 emit_int8((unsigned char)(0xC0 | encode)); 1917 emit_int32(value); 1918 } 1919 } 1920 1921 void Assembler::imull(Register dst, Address src) { 1922 InstructionMark im(this); 1923 prefix(src, dst); 1924 emit_int8(0x0F); 1925 emit_int8((unsigned char) 0xAF); 1926 emit_operand(dst, src); 1927 } 1928 1929 1930 void Assembler::incl(Address dst) { 1931 // Don't use it directly. Use MacroAssembler::increment() instead. 1932 InstructionMark im(this); 1933 prefix(dst); 1934 emit_int8((unsigned char)0xFF); 1935 emit_operand(rax, dst); 1936 } 1937 1938 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { 1939 InstructionMark im(this); 1940 assert((0 <= cc) && (cc < 16), "illegal cc"); 1941 if (L.is_bound()) { 1942 address dst = target(L); 1943 assert(dst != NULL, "jcc most probably wrong"); 1944 1945 const int short_size = 2; 1946 const int long_size = 6; 1947 intptr_t offs = (intptr_t)dst - (intptr_t)pc(); 1948 if (maybe_short && is8bit(offs - short_size)) { 1949 // 0111 tttn #8-bit disp 1950 emit_int8(0x70 | cc); 1951 emit_int8((offs - short_size) & 0xFF); 1952 } else { 1953 // 0000 1111 1000 tttn #32-bit disp 1954 assert(is_simm32(offs - long_size), 1955 "must be 32bit offset (call4)"); 1956 emit_int8(0x0F); 1957 emit_int8((unsigned char)(0x80 | cc)); 1958 emit_int32(offs - long_size); 1959 } 1960 } else { 1961 // Note: could eliminate cond. jumps to this jump if condition 1962 // is the same however, seems to be rather unlikely case. 1963 // Note: use jccb() if label to be bound is very close to get 1964 // an 8-bit displacement 1965 L.add_patch_at(code(), locator()); 1966 emit_int8(0x0F); 1967 emit_int8((unsigned char)(0x80 | cc)); 1968 emit_int32(0); 1969 } 1970 } 1971 1972 void Assembler::jccb(Condition cc, Label& L) { 1973 if (L.is_bound()) { 1974 const int short_size = 2; 1975 address entry = target(L); 1976 #ifdef ASSERT 1977 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 1978 intptr_t delta = short_branch_delta(); 1979 if (delta != 0) { 1980 dist += (dist < 0 ? (-delta) :delta); 1981 } 1982 assert(is8bit(dist), "Dispacement too large for a short jmp"); 1983 #endif 1984 intptr_t offs = (intptr_t)entry - (intptr_t)pc(); 1985 // 0111 tttn #8-bit disp 1986 emit_int8(0x70 | cc); 1987 emit_int8((offs - short_size) & 0xFF); 1988 } else { 1989 InstructionMark im(this); 1990 L.add_patch_at(code(), locator()); 1991 emit_int8(0x70 | cc); 1992 emit_int8(0); 1993 } 1994 } 1995 1996 void Assembler::jmp(Address adr) { 1997 InstructionMark im(this); 1998 prefix(adr); 1999 emit_int8((unsigned char)0xFF); 2000 emit_operand(rsp, adr); 2001 } 2002 2003 void Assembler::jmp(Label& L, bool maybe_short) { 2004 if (L.is_bound()) { 2005 address entry = target(L); 2006 assert(entry != NULL, "jmp most probably wrong"); 2007 InstructionMark im(this); 2008 const int short_size = 2; 2009 const int long_size = 5; 2010 intptr_t offs = entry - pc(); 2011 if (maybe_short && is8bit(offs - short_size)) { 2012 emit_int8((unsigned char)0xEB); 2013 emit_int8((offs - short_size) & 0xFF); 2014 } else { 2015 emit_int8((unsigned char)0xE9); 2016 emit_int32(offs - long_size); 2017 } 2018 } else { 2019 // By default, forward jumps are always 32-bit displacements, since 2020 // we can't yet know where the label will be bound. If you're sure that 2021 // the forward jump will not run beyond 256 bytes, use jmpb to 2022 // force an 8-bit displacement. 2023 InstructionMark im(this); 2024 L.add_patch_at(code(), locator()); 2025 emit_int8((unsigned char)0xE9); 2026 emit_int32(0); 2027 } 2028 } 2029 2030 void Assembler::jmp(Register entry) { 2031 int encode = prefix_and_encode(entry->encoding()); 2032 emit_int8((unsigned char)0xFF); 2033 emit_int8((unsigned char)(0xE0 | encode)); 2034 } 2035 2036 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 2037 InstructionMark im(this); 2038 emit_int8((unsigned char)0xE9); 2039 assert(dest != NULL, "must have a target"); 2040 intptr_t disp = dest - (pc() + sizeof(int32_t)); 2041 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 2042 emit_data(disp, rspec.reloc(), call32_operand); 2043 } 2044 2045 void Assembler::jmpb(Label& L) { 2046 if (L.is_bound()) { 2047 const int short_size = 2; 2048 address entry = target(L); 2049 assert(entry != NULL, "jmp most probably wrong"); 2050 #ifdef ASSERT 2051 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2052 intptr_t delta = short_branch_delta(); 2053 if (delta != 0) { 2054 dist += (dist < 0 ? (-delta) :delta); 2055 } 2056 assert(is8bit(dist), "Dispacement too large for a short jmp"); 2057 #endif 2058 intptr_t offs = entry - pc(); 2059 emit_int8((unsigned char)0xEB); 2060 emit_int8((offs - short_size) & 0xFF); 2061 } else { 2062 InstructionMark im(this); 2063 L.add_patch_at(code(), locator()); 2064 emit_int8((unsigned char)0xEB); 2065 emit_int8(0); 2066 } 2067 } 2068 2069 void Assembler::ldmxcsr( Address src) { 2070 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2071 InstructionMark im(this); 2072 prefix(src); 2073 emit_int8(0x0F); 2074 emit_int8((unsigned char)0xAE); 2075 emit_operand(as_Register(2), src); 2076 } 2077 2078 void Assembler::leal(Register dst, Address src) { 2079 InstructionMark im(this); 2080 #ifdef _LP64 2081 emit_int8(0x67); // addr32 2082 prefix(src, dst); 2083 #endif // LP64 2084 emit_int8((unsigned char)0x8D); 2085 emit_operand(dst, src); 2086 } 2087 2088 void Assembler::lfence() { 2089 emit_int8(0x0F); 2090 emit_int8((unsigned char)0xAE); 2091 emit_int8((unsigned char)0xE8); 2092 } 2093 2094 void Assembler::lock() { 2095 emit_int8((unsigned char)0xF0); 2096 } 2097 2098 void Assembler::lzcntl(Register dst, Register src) { 2099 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 2100 emit_int8((unsigned char)0xF3); 2101 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2102 emit_int8(0x0F); 2103 emit_int8((unsigned char)0xBD); 2104 emit_int8((unsigned char)(0xC0 | encode)); 2105 } 2106 2107 // Emit mfence instruction 2108 void Assembler::mfence() { 2109 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2110 emit_int8(0x0F); 2111 emit_int8((unsigned char)0xAE); 2112 emit_int8((unsigned char)0xF0); 2113 } 2114 2115 void Assembler::mov(Register dst, Register src) { 2116 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2117 } 2118 2119 void Assembler::movapd(XMMRegister dst, XMMRegister src) { 2120 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2121 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2122 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2123 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2124 emit_int8(0x28); 2125 emit_int8((unsigned char)(0xC0 | encode)); 2126 } 2127 2128 void Assembler::movaps(XMMRegister dst, XMMRegister src) { 2129 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2130 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2131 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2132 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2133 emit_int8(0x28); 2134 emit_int8((unsigned char)(0xC0 | encode)); 2135 } 2136 2137 void Assembler::movlhps(XMMRegister dst, XMMRegister src) { 2138 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2139 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2140 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2141 emit_int8(0x16); 2142 emit_int8((unsigned char)(0xC0 | encode)); 2143 } 2144 2145 void Assembler::movb(Register dst, Address src) { 2146 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 2147 InstructionMark im(this); 2148 prefix(src, dst, true); 2149 emit_int8((unsigned char)0x8A); 2150 emit_operand(dst, src); 2151 } 2152 2153 void Assembler::movddup(XMMRegister dst, XMMRegister src) { 2154 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 2155 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_128bit; 2156 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2157 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2158 emit_int8(0x12); 2159 emit_int8(0xC0 | encode); 2160 } 2161 2162 void Assembler::kmovbl(KRegister dst, Register src) { 2163 assert(VM_Version::supports_avx512dq(), ""); 2164 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2165 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2166 emit_int8((unsigned char)0x92); 2167 emit_int8((unsigned char)(0xC0 | encode)); 2168 } 2169 2170 void Assembler::kmovbl(Register dst, KRegister src) { 2171 assert(VM_Version::supports_avx512dq(), ""); 2172 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2173 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2174 emit_int8((unsigned char)0x93); 2175 emit_int8((unsigned char)(0xC0 | encode)); 2176 } 2177 2178 void Assembler::kmovwl(KRegister dst, Register src) { 2179 assert(VM_Version::supports_evex(), ""); 2180 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2181 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2182 emit_int8((unsigned char)0x92); 2183 emit_int8((unsigned char)(0xC0 | encode)); 2184 } 2185 2186 void Assembler::kmovwl(Register dst, KRegister src) { 2187 assert(VM_Version::supports_evex(), ""); 2188 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2189 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2190 emit_int8((unsigned char)0x93); 2191 emit_int8((unsigned char)(0xC0 | encode)); 2192 } 2193 2194 void Assembler::kmovdl(KRegister dst, Register src) { 2195 assert(VM_Version::supports_avx512bw(), ""); 2196 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2197 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2198 emit_int8((unsigned char)0x92); 2199 emit_int8((unsigned char)(0xC0 | encode)); 2200 } 2201 2202 void Assembler::kmovdl(Register dst, KRegister src) { 2203 assert(VM_Version::supports_avx512bw(), ""); 2204 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2205 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2206 emit_int8((unsigned char)0x93); 2207 emit_int8((unsigned char)(0xC0 | encode)); 2208 } 2209 2210 void Assembler::kmovql(KRegister dst, KRegister src) { 2211 assert(VM_Version::supports_avx512bw(), ""); 2212 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2213 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2214 emit_int8((unsigned char)0x90); 2215 emit_int8((unsigned char)(0xC0 | encode)); 2216 } 2217 2218 void Assembler::kmovql(KRegister dst, Address src) { 2219 assert(VM_Version::supports_avx512bw(), ""); 2220 InstructionMark im(this); 2221 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2222 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2223 emit_int8((unsigned char)0x90); 2224 emit_operand((Register)dst, src); 2225 } 2226 2227 void Assembler::kmovql(Address dst, KRegister src) { 2228 assert(VM_Version::supports_avx512bw(), ""); 2229 InstructionMark im(this); 2230 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2231 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2232 emit_int8((unsigned char)0x90); 2233 emit_operand((Register)src, dst); 2234 } 2235 2236 void Assembler::kmovql(KRegister dst, Register src) { 2237 assert(VM_Version::supports_avx512bw(), ""); 2238 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2239 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2240 emit_int8((unsigned char)0x92); 2241 emit_int8((unsigned char)(0xC0 | encode)); 2242 } 2243 2244 void Assembler::kmovql(Register dst, KRegister src) { 2245 assert(VM_Version::supports_avx512bw(), ""); 2246 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2247 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2248 emit_int8((unsigned char)0x93); 2249 emit_int8((unsigned char)(0xC0 | encode)); 2250 } 2251 2252 // This instruction produces ZF or CF flags 2253 void Assembler::kortestbl(KRegister src1, KRegister src2) { 2254 assert(VM_Version::supports_avx512dq(), ""); 2255 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2256 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2257 emit_int8((unsigned char)0x98); 2258 emit_int8((unsigned char)(0xC0 | encode)); 2259 } 2260 2261 // This instruction produces ZF or CF flags 2262 void Assembler::kortestwl(KRegister src1, KRegister src2) { 2263 assert(VM_Version::supports_evex(), ""); 2264 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2265 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2266 emit_int8((unsigned char)0x98); 2267 emit_int8((unsigned char)(0xC0 | encode)); 2268 } 2269 2270 // This instruction produces ZF or CF flags 2271 void Assembler::kortestdl(KRegister src1, KRegister src2) { 2272 assert(VM_Version::supports_avx512bw(), ""); 2273 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2274 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2275 emit_int8((unsigned char)0x98); 2276 emit_int8((unsigned char)(0xC0 | encode)); 2277 } 2278 2279 // This instruction produces ZF or CF flags 2280 void Assembler::kortestql(KRegister src1, KRegister src2) { 2281 assert(VM_Version::supports_avx512bw(), ""); 2282 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2283 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2284 emit_int8((unsigned char)0x98); 2285 emit_int8((unsigned char)(0xC0 | encode)); 2286 } 2287 2288 void Assembler::movb(Address dst, int imm8) { 2289 InstructionMark im(this); 2290 prefix(dst); 2291 emit_int8((unsigned char)0xC6); 2292 emit_operand(rax, dst, 1); 2293 emit_int8(imm8); 2294 } 2295 2296 2297 void Assembler::movb(Address dst, Register src) { 2298 assert(src->has_byte_register(), "must have byte register"); 2299 InstructionMark im(this); 2300 prefix(dst, src, true); 2301 emit_int8((unsigned char)0x88); 2302 emit_operand(src, dst); 2303 } 2304 2305 void Assembler::movdl(XMMRegister dst, Register src) { 2306 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2307 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2308 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2309 emit_int8(0x6E); 2310 emit_int8((unsigned char)(0xC0 | encode)); 2311 } 2312 2313 void Assembler::movdl(Register dst, XMMRegister src) { 2314 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2315 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2316 // swap src/dst to get correct prefix 2317 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2318 emit_int8(0x7E); 2319 emit_int8((unsigned char)(0xC0 | encode)); 2320 } 2321 2322 void Assembler::movdl(XMMRegister dst, Address src) { 2323 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2324 InstructionMark im(this); 2325 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2326 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2327 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2328 emit_int8(0x6E); 2329 emit_operand(dst, src); 2330 } 2331 2332 void Assembler::movdl(Address dst, XMMRegister src) { 2333 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2334 InstructionMark im(this); 2335 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2336 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2337 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2338 emit_int8(0x7E); 2339 emit_operand(src, dst); 2340 } 2341 2342 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { 2343 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2344 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2345 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2346 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2347 emit_int8(0x6F); 2348 emit_int8((unsigned char)(0xC0 | encode)); 2349 } 2350 2351 void Assembler::movdqa(XMMRegister dst, Address src) { 2352 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2353 InstructionMark im(this); 2354 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2355 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2356 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2357 emit_int8(0x6F); 2358 emit_operand(dst, src); 2359 } 2360 2361 void Assembler::movdqu(XMMRegister dst, Address src) { 2362 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2363 InstructionMark im(this); 2364 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2365 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2366 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2367 emit_int8(0x6F); 2368 emit_operand(dst, src); 2369 } 2370 2371 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { 2372 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2373 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2374 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2375 emit_int8(0x6F); 2376 emit_int8((unsigned char)(0xC0 | encode)); 2377 } 2378 2379 void Assembler::movdqu(Address dst, XMMRegister src) { 2380 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2381 InstructionMark im(this); 2382 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2383 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2384 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2385 emit_int8(0x7F); 2386 emit_operand(src, dst); 2387 } 2388 2389 // Move Unaligned 256bit Vector 2390 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2391 assert(UseAVX > 0, ""); 2392 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2393 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2394 emit_int8(0x6F); 2395 emit_int8((unsigned char)(0xC0 | encode)); 2396 } 2397 2398 void Assembler::vmovdqu(XMMRegister dst, Address src) { 2399 assert(UseAVX > 0, ""); 2400 InstructionMark im(this); 2401 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2402 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2403 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2404 emit_int8(0x6F); 2405 emit_operand(dst, src); 2406 } 2407 2408 void Assembler::vmovdqu(Address dst, XMMRegister src) { 2409 assert(UseAVX > 0, ""); 2410 InstructionMark im(this); 2411 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2412 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2413 // swap src<->dst for encoding 2414 assert(src != xnoreg, "sanity"); 2415 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2416 emit_int8(0x7F); 2417 emit_operand(src, dst); 2418 } 2419 2420 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64) 2421 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { 2422 assert(VM_Version::supports_evex(), ""); 2423 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2424 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2425 emit_int8(0x6F); 2426 emit_int8((unsigned char)(0xC0 | encode)); 2427 } 2428 2429 void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) { 2430 assert(VM_Version::supports_evex(), ""); 2431 InstructionMark im(this); 2432 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2433 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2434 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2435 emit_int8(0x6F); 2436 emit_operand(dst, src); 2437 } 2438 2439 void Assembler::evmovdqub(Address dst, XMMRegister src, int vector_len) { 2440 assert(VM_Version::supports_evex(), ""); 2441 assert(src != xnoreg, "sanity"); 2442 InstructionMark im(this); 2443 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2444 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2445 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2446 emit_int8(0x7F); 2447 emit_operand(src, dst); 2448 } 2449 2450 void Assembler::evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { 2451 assert(VM_Version::supports_evex(), ""); 2452 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2453 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2454 emit_int8(0x6F); 2455 emit_int8((unsigned char)(0xC0 | encode)); 2456 } 2457 2458 void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) { 2459 assert(VM_Version::supports_evex(), ""); 2460 InstructionMark im(this); 2461 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2462 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2463 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2464 emit_int8(0x6F); 2465 emit_operand(dst, src); 2466 } 2467 2468 void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) { 2469 assert(VM_Version::supports_evex(), ""); 2470 assert(src != xnoreg, "sanity"); 2471 InstructionMark im(this); 2472 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2473 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2474 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2475 emit_int8(0x7F); 2476 emit_operand(src, dst); 2477 } 2478 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 2479 assert(VM_Version::supports_evex(), ""); 2480 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2481 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2482 emit_int8(0x6F); 2483 emit_int8((unsigned char)(0xC0 | encode)); 2484 } 2485 2486 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) { 2487 assert(VM_Version::supports_evex(), ""); 2488 InstructionMark im(this); 2489 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true); 2490 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2491 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2492 emit_int8(0x6F); 2493 emit_operand(dst, src); 2494 } 2495 2496 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) { 2497 assert(VM_Version::supports_evex(), ""); 2498 assert(src != xnoreg, "sanity"); 2499 InstructionMark im(this); 2500 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2501 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2502 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2503 emit_int8(0x7F); 2504 emit_operand(src, dst); 2505 } 2506 2507 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 2508 assert(VM_Version::supports_evex(), ""); 2509 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2510 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2511 emit_int8(0x6F); 2512 emit_int8((unsigned char)(0xC0 | encode)); 2513 } 2514 2515 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) { 2516 assert(VM_Version::supports_evex(), ""); 2517 InstructionMark im(this); 2518 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2519 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2520 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2521 emit_int8(0x6F); 2522 emit_operand(dst, src); 2523 } 2524 2525 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) { 2526 assert(VM_Version::supports_evex(), ""); 2527 assert(src != xnoreg, "sanity"); 2528 InstructionMark im(this); 2529 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2530 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2531 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2532 emit_int8(0x7F); 2533 emit_operand(src, dst); 2534 } 2535 2536 // Uses zero extension on 64bit 2537 2538 void Assembler::movl(Register dst, int32_t imm32) { 2539 int encode = prefix_and_encode(dst->encoding()); 2540 emit_int8((unsigned char)(0xB8 | encode)); 2541 emit_int32(imm32); 2542 } 2543 2544 void Assembler::movl(Register dst, Register src) { 2545 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2546 emit_int8((unsigned char)0x8B); 2547 emit_int8((unsigned char)(0xC0 | encode)); 2548 } 2549 2550 void Assembler::movl(Register dst, Address src) { 2551 InstructionMark im(this); 2552 prefix(src, dst); 2553 emit_int8((unsigned char)0x8B); 2554 emit_operand(dst, src); 2555 } 2556 2557 void Assembler::movl(Address dst, int32_t imm32) { 2558 InstructionMark im(this); 2559 prefix(dst); 2560 emit_int8((unsigned char)0xC7); 2561 emit_operand(rax, dst, 4); 2562 emit_int32(imm32); 2563 } 2564 2565 void Assembler::movl(Address dst, Register src) { 2566 InstructionMark im(this); 2567 prefix(dst, src); 2568 emit_int8((unsigned char)0x89); 2569 emit_operand(src, dst); 2570 } 2571 2572 // New cpus require to use movsd and movss to avoid partial register stall 2573 // when loading from memory. But for old Opteron use movlpd instead of movsd. 2574 // The selection is done in MacroAssembler::movdbl() and movflt(). 2575 void Assembler::movlpd(XMMRegister dst, Address src) { 2576 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2577 InstructionMark im(this); 2578 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2579 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2580 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2581 emit_int8(0x12); 2582 emit_operand(dst, src); 2583 } 2584 2585 void Assembler::movq( MMXRegister dst, Address src ) { 2586 assert( VM_Version::supports_mmx(), "" ); 2587 emit_int8(0x0F); 2588 emit_int8(0x6F); 2589 emit_operand(dst, src); 2590 } 2591 2592 void Assembler::movq( Address dst, MMXRegister src ) { 2593 assert( VM_Version::supports_mmx(), "" ); 2594 emit_int8(0x0F); 2595 emit_int8(0x7F); 2596 // workaround gcc (3.2.1-7a) bug 2597 // In that version of gcc with only an emit_operand(MMX, Address) 2598 // gcc will tail jump and try and reverse the parameters completely 2599 // obliterating dst in the process. By having a version available 2600 // that doesn't need to swap the args at the tail jump the bug is 2601 // avoided. 2602 emit_operand(dst, src); 2603 } 2604 2605 void Assembler::movq(XMMRegister dst, Address src) { 2606 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2607 InstructionMark im(this); 2608 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2609 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2610 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2611 emit_int8(0x7E); 2612 emit_operand(dst, src); 2613 } 2614 2615 void Assembler::movq(Address dst, XMMRegister src) { 2616 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2617 InstructionMark im(this); 2618 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2619 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2620 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2621 emit_int8((unsigned char)0xD6); 2622 emit_operand(src, dst); 2623 } 2624 2625 void Assembler::movsbl(Register dst, Address src) { // movsxb 2626 InstructionMark im(this); 2627 prefix(src, dst); 2628 emit_int8(0x0F); 2629 emit_int8((unsigned char)0xBE); 2630 emit_operand(dst, src); 2631 } 2632 2633 void Assembler::movsbl(Register dst, Register src) { // movsxb 2634 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2635 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 2636 emit_int8(0x0F); 2637 emit_int8((unsigned char)0xBE); 2638 emit_int8((unsigned char)(0xC0 | encode)); 2639 } 2640 2641 void Assembler::movsd(XMMRegister dst, XMMRegister src) { 2642 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2643 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2644 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2645 emit_int8(0x10); 2646 emit_int8((unsigned char)(0xC0 | encode)); 2647 } 2648 2649 void Assembler::movsd(XMMRegister dst, Address src) { 2650 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2651 InstructionMark im(this); 2652 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2653 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2654 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2655 emit_int8(0x10); 2656 emit_operand(dst, src); 2657 } 2658 2659 void Assembler::movsd(Address dst, XMMRegister src) { 2660 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2661 InstructionMark im(this); 2662 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2663 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2664 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2665 emit_int8(0x11); 2666 emit_operand(src, dst); 2667 } 2668 2669 void Assembler::movss(XMMRegister dst, XMMRegister src) { 2670 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2671 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2672 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2673 emit_int8(0x10); 2674 emit_int8((unsigned char)(0xC0 | encode)); 2675 } 2676 2677 void Assembler::movss(XMMRegister dst, Address src) { 2678 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2679 InstructionMark im(this); 2680 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2681 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2682 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2683 emit_int8(0x10); 2684 emit_operand(dst, src); 2685 } 2686 2687 void Assembler::movss(Address dst, XMMRegister src) { 2688 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2689 InstructionMark im(this); 2690 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2691 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2692 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2693 emit_int8(0x11); 2694 emit_operand(src, dst); 2695 } 2696 2697 void Assembler::movswl(Register dst, Address src) { // movsxw 2698 InstructionMark im(this); 2699 prefix(src, dst); 2700 emit_int8(0x0F); 2701 emit_int8((unsigned char)0xBF); 2702 emit_operand(dst, src); 2703 } 2704 2705 void Assembler::movswl(Register dst, Register src) { // movsxw 2706 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2707 emit_int8(0x0F); 2708 emit_int8((unsigned char)0xBF); 2709 emit_int8((unsigned char)(0xC0 | encode)); 2710 } 2711 2712 void Assembler::movw(Address dst, int imm16) { 2713 InstructionMark im(this); 2714 2715 emit_int8(0x66); // switch to 16-bit mode 2716 prefix(dst); 2717 emit_int8((unsigned char)0xC7); 2718 emit_operand(rax, dst, 2); 2719 emit_int16(imm16); 2720 } 2721 2722 void Assembler::movw(Register dst, Address src) { 2723 InstructionMark im(this); 2724 emit_int8(0x66); 2725 prefix(src, dst); 2726 emit_int8((unsigned char)0x8B); 2727 emit_operand(dst, src); 2728 } 2729 2730 void Assembler::movw(Address dst, Register src) { 2731 InstructionMark im(this); 2732 emit_int8(0x66); 2733 prefix(dst, src); 2734 emit_int8((unsigned char)0x89); 2735 emit_operand(src, dst); 2736 } 2737 2738 void Assembler::movzbl(Register dst, Address src) { // movzxb 2739 InstructionMark im(this); 2740 prefix(src, dst); 2741 emit_int8(0x0F); 2742 emit_int8((unsigned char)0xB6); 2743 emit_operand(dst, src); 2744 } 2745 2746 void Assembler::movzbl(Register dst, Register src) { // movzxb 2747 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2748 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 2749 emit_int8(0x0F); 2750 emit_int8((unsigned char)0xB6); 2751 emit_int8(0xC0 | encode); 2752 } 2753 2754 void Assembler::movzwl(Register dst, Address src) { // movzxw 2755 InstructionMark im(this); 2756 prefix(src, dst); 2757 emit_int8(0x0F); 2758 emit_int8((unsigned char)0xB7); 2759 emit_operand(dst, src); 2760 } 2761 2762 void Assembler::movzwl(Register dst, Register src) { // movzxw 2763 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2764 emit_int8(0x0F); 2765 emit_int8((unsigned char)0xB7); 2766 emit_int8(0xC0 | encode); 2767 } 2768 2769 void Assembler::mull(Address src) { 2770 InstructionMark im(this); 2771 prefix(src); 2772 emit_int8((unsigned char)0xF7); 2773 emit_operand(rsp, src); 2774 } 2775 2776 void Assembler::mull(Register src) { 2777 int encode = prefix_and_encode(src->encoding()); 2778 emit_int8((unsigned char)0xF7); 2779 emit_int8((unsigned char)(0xE0 | encode)); 2780 } 2781 2782 void Assembler::mulsd(XMMRegister dst, Address src) { 2783 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2784 InstructionMark im(this); 2785 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2786 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2787 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2788 emit_int8(0x59); 2789 emit_operand(dst, src); 2790 } 2791 2792 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { 2793 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2794 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2795 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2796 emit_int8(0x59); 2797 emit_int8((unsigned char)(0xC0 | encode)); 2798 } 2799 2800 void Assembler::mulss(XMMRegister dst, Address src) { 2801 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2802 InstructionMark im(this); 2803 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2804 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2805 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2806 emit_int8(0x59); 2807 emit_operand(dst, src); 2808 } 2809 2810 void Assembler::mulss(XMMRegister dst, XMMRegister src) { 2811 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2812 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 2813 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2814 emit_int8(0x59); 2815 emit_int8((unsigned char)(0xC0 | encode)); 2816 } 2817 2818 void Assembler::negl(Register dst) { 2819 int encode = prefix_and_encode(dst->encoding()); 2820 emit_int8((unsigned char)0xF7); 2821 emit_int8((unsigned char)(0xD8 | encode)); 2822 } 2823 2824 void Assembler::nop(int i) { 2825 #ifdef ASSERT 2826 assert(i > 0, " "); 2827 // The fancy nops aren't currently recognized by debuggers making it a 2828 // pain to disassemble code while debugging. If asserts are on clearly 2829 // speed is not an issue so simply use the single byte traditional nop 2830 // to do alignment. 2831 2832 for (; i > 0 ; i--) emit_int8((unsigned char)0x90); 2833 return; 2834 2835 #endif // ASSERT 2836 2837 if (UseAddressNop && VM_Version::is_intel()) { 2838 // 2839 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel 2840 // 1: 0x90 2841 // 2: 0x66 0x90 2842 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2843 // 4: 0x0F 0x1F 0x40 0x00 2844 // 5: 0x0F 0x1F 0x44 0x00 0x00 2845 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2846 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2847 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2848 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2849 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2850 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2851 2852 // The rest coding is Intel specific - don't use consecutive address nops 2853 2854 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2855 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2856 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2857 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2858 2859 while(i >= 15) { 2860 // For Intel don't generate consecutive addess nops (mix with regular nops) 2861 i -= 15; 2862 emit_int8(0x66); // size prefix 2863 emit_int8(0x66); // size prefix 2864 emit_int8(0x66); // size prefix 2865 addr_nop_8(); 2866 emit_int8(0x66); // size prefix 2867 emit_int8(0x66); // size prefix 2868 emit_int8(0x66); // size prefix 2869 emit_int8((unsigned char)0x90); 2870 // nop 2871 } 2872 switch (i) { 2873 case 14: 2874 emit_int8(0x66); // size prefix 2875 case 13: 2876 emit_int8(0x66); // size prefix 2877 case 12: 2878 addr_nop_8(); 2879 emit_int8(0x66); // size prefix 2880 emit_int8(0x66); // size prefix 2881 emit_int8(0x66); // size prefix 2882 emit_int8((unsigned char)0x90); 2883 // nop 2884 break; 2885 case 11: 2886 emit_int8(0x66); // size prefix 2887 case 10: 2888 emit_int8(0x66); // size prefix 2889 case 9: 2890 emit_int8(0x66); // size prefix 2891 case 8: 2892 addr_nop_8(); 2893 break; 2894 case 7: 2895 addr_nop_7(); 2896 break; 2897 case 6: 2898 emit_int8(0x66); // size prefix 2899 case 5: 2900 addr_nop_5(); 2901 break; 2902 case 4: 2903 addr_nop_4(); 2904 break; 2905 case 3: 2906 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 2907 emit_int8(0x66); // size prefix 2908 case 2: 2909 emit_int8(0x66); // size prefix 2910 case 1: 2911 emit_int8((unsigned char)0x90); 2912 // nop 2913 break; 2914 default: 2915 assert(i == 0, " "); 2916 } 2917 return; 2918 } 2919 if (UseAddressNop && VM_Version::is_amd()) { 2920 // 2921 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. 2922 // 1: 0x90 2923 // 2: 0x66 0x90 2924 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2925 // 4: 0x0F 0x1F 0x40 0x00 2926 // 5: 0x0F 0x1F 0x44 0x00 0x00 2927 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2928 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2929 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2930 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2931 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2932 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2933 2934 // The rest coding is AMD specific - use consecutive address nops 2935 2936 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 2937 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 2938 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2939 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2940 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2941 // Size prefixes (0x66) are added for larger sizes 2942 2943 while(i >= 22) { 2944 i -= 11; 2945 emit_int8(0x66); // size prefix 2946 emit_int8(0x66); // size prefix 2947 emit_int8(0x66); // size prefix 2948 addr_nop_8(); 2949 } 2950 // Generate first nop for size between 21-12 2951 switch (i) { 2952 case 21: 2953 i -= 1; 2954 emit_int8(0x66); // size prefix 2955 case 20: 2956 case 19: 2957 i -= 1; 2958 emit_int8(0x66); // size prefix 2959 case 18: 2960 case 17: 2961 i -= 1; 2962 emit_int8(0x66); // size prefix 2963 case 16: 2964 case 15: 2965 i -= 8; 2966 addr_nop_8(); 2967 break; 2968 case 14: 2969 case 13: 2970 i -= 7; 2971 addr_nop_7(); 2972 break; 2973 case 12: 2974 i -= 6; 2975 emit_int8(0x66); // size prefix 2976 addr_nop_5(); 2977 break; 2978 default: 2979 assert(i < 12, " "); 2980 } 2981 2982 // Generate second nop for size between 11-1 2983 switch (i) { 2984 case 11: 2985 emit_int8(0x66); // size prefix 2986 case 10: 2987 emit_int8(0x66); // size prefix 2988 case 9: 2989 emit_int8(0x66); // size prefix 2990 case 8: 2991 addr_nop_8(); 2992 break; 2993 case 7: 2994 addr_nop_7(); 2995 break; 2996 case 6: 2997 emit_int8(0x66); // size prefix 2998 case 5: 2999 addr_nop_5(); 3000 break; 3001 case 4: 3002 addr_nop_4(); 3003 break; 3004 case 3: 3005 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3006 emit_int8(0x66); // size prefix 3007 case 2: 3008 emit_int8(0x66); // size prefix 3009 case 1: 3010 emit_int8((unsigned char)0x90); 3011 // nop 3012 break; 3013 default: 3014 assert(i == 0, " "); 3015 } 3016 return; 3017 } 3018 3019 // Using nops with size prefixes "0x66 0x90". 3020 // From AMD Optimization Guide: 3021 // 1: 0x90 3022 // 2: 0x66 0x90 3023 // 3: 0x66 0x66 0x90 3024 // 4: 0x66 0x66 0x66 0x90 3025 // 5: 0x66 0x66 0x90 0x66 0x90 3026 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 3027 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 3028 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 3029 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3030 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3031 // 3032 while(i > 12) { 3033 i -= 4; 3034 emit_int8(0x66); // size prefix 3035 emit_int8(0x66); 3036 emit_int8(0x66); 3037 emit_int8((unsigned char)0x90); 3038 // nop 3039 } 3040 // 1 - 12 nops 3041 if(i > 8) { 3042 if(i > 9) { 3043 i -= 1; 3044 emit_int8(0x66); 3045 } 3046 i -= 3; 3047 emit_int8(0x66); 3048 emit_int8(0x66); 3049 emit_int8((unsigned char)0x90); 3050 } 3051 // 1 - 8 nops 3052 if(i > 4) { 3053 if(i > 6) { 3054 i -= 1; 3055 emit_int8(0x66); 3056 } 3057 i -= 3; 3058 emit_int8(0x66); 3059 emit_int8(0x66); 3060 emit_int8((unsigned char)0x90); 3061 } 3062 switch (i) { 3063 case 4: 3064 emit_int8(0x66); 3065 case 3: 3066 emit_int8(0x66); 3067 case 2: 3068 emit_int8(0x66); 3069 case 1: 3070 emit_int8((unsigned char)0x90); 3071 break; 3072 default: 3073 assert(i == 0, " "); 3074 } 3075 } 3076 3077 void Assembler::notl(Register dst) { 3078 int encode = prefix_and_encode(dst->encoding()); 3079 emit_int8((unsigned char)0xF7); 3080 emit_int8((unsigned char)(0xD0 | encode)); 3081 } 3082 3083 void Assembler::orl(Address dst, int32_t imm32) { 3084 InstructionMark im(this); 3085 prefix(dst); 3086 emit_arith_operand(0x81, rcx, dst, imm32); 3087 } 3088 3089 void Assembler::orl(Register dst, int32_t imm32) { 3090 prefix(dst); 3091 emit_arith(0x81, 0xC8, dst, imm32); 3092 } 3093 3094 void Assembler::orl(Register dst, Address src) { 3095 InstructionMark im(this); 3096 prefix(src, dst); 3097 emit_int8(0x0B); 3098 emit_operand(dst, src); 3099 } 3100 3101 void Assembler::orl(Register dst, Register src) { 3102 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3103 emit_arith(0x0B, 0xC0, dst, src); 3104 } 3105 3106 void Assembler::orl(Address dst, Register src) { 3107 InstructionMark im(this); 3108 prefix(dst, src); 3109 emit_int8(0x09); 3110 emit_operand(src, dst); 3111 } 3112 3113 void Assembler::packuswb(XMMRegister dst, Address src) { 3114 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3115 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3116 InstructionMark im(this); 3117 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3118 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3119 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3120 emit_int8(0x67); 3121 emit_operand(dst, src); 3122 } 3123 3124 void Assembler::packuswb(XMMRegister dst, XMMRegister src) { 3125 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3126 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3127 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3128 emit_int8(0x67); 3129 emit_int8((unsigned char)(0xC0 | encode)); 3130 } 3131 3132 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3133 assert(UseAVX > 0, "some form of AVX must be enabled"); 3134 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3135 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3136 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3137 emit_int8(0x67); 3138 emit_int8((unsigned char)(0xC0 | encode)); 3139 } 3140 3141 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 3142 assert(VM_Version::supports_avx2(), ""); 3143 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3144 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3145 emit_int8(0x00); 3146 emit_int8(0xC0 | encode); 3147 emit_int8(imm8); 3148 } 3149 3150 void Assembler::pause() { 3151 emit_int8((unsigned char)0xF3); 3152 emit_int8((unsigned char)0x90); 3153 } 3154 3155 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3156 assert(VM_Version::supports_sse4_2(), ""); 3157 InstructionMark im(this); 3158 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3159 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3160 emit_int8(0x61); 3161 emit_operand(dst, src); 3162 emit_int8(imm8); 3163 } 3164 3165 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3166 assert(VM_Version::supports_sse4_2(), ""); 3167 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3168 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3169 emit_int8(0x61); 3170 emit_int8((unsigned char)(0xC0 | encode)); 3171 emit_int8(imm8); 3172 } 3173 3174 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3175 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3176 assert(VM_Version::supports_sse2(), ""); 3177 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3178 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3179 emit_int8(0x74); 3180 emit_int8((unsigned char)(0xC0 | encode)); 3181 } 3182 3183 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3184 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3185 assert(VM_Version::supports_avx(), ""); 3186 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3187 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3188 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3189 emit_int8(0x74); 3190 emit_int8((unsigned char)(0xC0 | encode)); 3191 } 3192 3193 // In this context, kdst is written the mask used to process the equal components 3194 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3195 assert(VM_Version::supports_avx512bw(), ""); 3196 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3197 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3198 int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3199 emit_int8(0x74); 3200 emit_int8((unsigned char)(0xC0 | encode)); 3201 } 3202 3203 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3204 assert(VM_Version::supports_avx512bw(), ""); 3205 InstructionMark im(this); 3206 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3207 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3208 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3209 int dst_enc = kdst->encoding(); 3210 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3211 emit_int8(0x74); 3212 emit_operand(as_Register(dst_enc), src); 3213 } 3214 3215 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3216 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3217 assert(VM_Version::supports_sse2(), ""); 3218 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3219 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3220 emit_int8(0x75); 3221 emit_int8((unsigned char)(0xC0 | encode)); 3222 } 3223 3224 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3225 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3226 assert(VM_Version::supports_avx(), ""); 3227 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3228 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3229 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3230 emit_int8(0x75); 3231 emit_int8((unsigned char)(0xC0 | encode)); 3232 } 3233 3234 // In this context, kdst is written the mask used to process the equal components 3235 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3236 assert(VM_Version::supports_avx512bw(), ""); 3237 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3238 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3239 int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3240 emit_int8(0x75); 3241 emit_int8((unsigned char)(0xC0 | encode)); 3242 } 3243 3244 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3245 assert(VM_Version::supports_avx512bw(), ""); 3246 InstructionMark im(this); 3247 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3248 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3249 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3250 int dst_enc = kdst->encoding(); 3251 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3252 emit_int8(0x75); 3253 emit_operand(as_Register(dst_enc), src); 3254 } 3255 3256 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3257 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { 3258 assert(VM_Version::supports_sse2(), ""); 3259 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3260 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3261 emit_int8(0x76); 3262 emit_int8((unsigned char)(0xC0 | encode)); 3263 } 3264 3265 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3266 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3267 assert(VM_Version::supports_avx(), ""); 3268 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3269 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3270 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3271 emit_int8(0x76); 3272 emit_int8((unsigned char)(0xC0 | encode)); 3273 } 3274 3275 // In this context, kdst is written the mask used to process the equal components 3276 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3277 assert(VM_Version::supports_evex(), ""); 3278 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3279 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3280 int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3281 emit_int8(0x76); 3282 emit_int8((unsigned char)(0xC0 | encode)); 3283 } 3284 3285 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3286 assert(VM_Version::supports_evex(), ""); 3287 InstructionMark im(this); 3288 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3289 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3290 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3291 int dst_enc = kdst->encoding(); 3292 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3293 emit_int8(0x76); 3294 emit_operand(as_Register(dst_enc), src); 3295 } 3296 3297 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3298 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) { 3299 assert(VM_Version::supports_sse4_1(), ""); 3300 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3301 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3302 emit_int8(0x29); 3303 emit_int8((unsigned char)(0xC0 | encode)); 3304 } 3305 3306 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3307 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3308 assert(VM_Version::supports_avx(), ""); 3309 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3310 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3311 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3312 emit_int8(0x29); 3313 emit_int8((unsigned char)(0xC0 | encode)); 3314 } 3315 3316 // In this context, kdst is written the mask used to process the equal components 3317 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3318 assert(VM_Version::supports_evex(), ""); 3319 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3320 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3321 int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3322 emit_int8(0x29); 3323 emit_int8((unsigned char)(0xC0 | encode)); 3324 } 3325 3326 // In this context, kdst is written the mask used to process the equal components 3327 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3328 assert(VM_Version::supports_evex(), ""); 3329 InstructionMark im(this); 3330 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3331 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 3332 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 3333 int dst_enc = kdst->encoding(); 3334 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3335 emit_int8(0x29); 3336 emit_operand(as_Register(dst_enc), src); 3337 } 3338 3339 void Assembler::pmovmskb(Register dst, XMMRegister src) { 3340 assert(VM_Version::supports_sse2(), ""); 3341 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3342 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3343 emit_int8((unsigned char)0xD7); 3344 emit_int8((unsigned char)(0xC0 | encode)); 3345 } 3346 3347 void Assembler::vpmovmskb(Register dst, XMMRegister src) { 3348 assert(VM_Version::supports_avx2(), ""); 3349 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3350 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3351 emit_int8((unsigned char)0xD7); 3352 emit_int8((unsigned char)(0xC0 | encode)); 3353 } 3354 3355 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 3356 assert(VM_Version::supports_sse4_1(), ""); 3357 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 3358 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3359 emit_int8(0x16); 3360 emit_int8((unsigned char)(0xC0 | encode)); 3361 emit_int8(imm8); 3362 } 3363 3364 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) { 3365 assert(VM_Version::supports_sse4_1(), ""); 3366 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 3367 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3368 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3369 emit_int8(0x16); 3370 emit_operand(src, dst); 3371 emit_int8(imm8); 3372 } 3373 3374 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 3375 assert(VM_Version::supports_sse4_1(), ""); 3376 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 3377 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3378 emit_int8(0x16); 3379 emit_int8((unsigned char)(0xC0 | encode)); 3380 emit_int8(imm8); 3381 } 3382 3383 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) { 3384 assert(VM_Version::supports_sse4_1(), ""); 3385 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 3386 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3387 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3388 emit_int8(0x16); 3389 emit_operand(src, dst); 3390 emit_int8(imm8); 3391 } 3392 3393 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { 3394 assert(VM_Version::supports_sse2(), ""); 3395 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3396 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3397 emit_int8((unsigned char)0xC5); 3398 emit_int8((unsigned char)(0xC0 | encode)); 3399 emit_int8(imm8); 3400 } 3401 3402 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) { 3403 assert(VM_Version::supports_sse4_1(), ""); 3404 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3405 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 3406 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3407 emit_int8((unsigned char)0x15); 3408 emit_operand(src, dst); 3409 emit_int8(imm8); 3410 } 3411 3412 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) { 3413 assert(VM_Version::supports_sse4_1(), ""); 3414 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3415 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 3416 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3417 emit_int8(0x14); 3418 emit_operand(src, dst); 3419 emit_int8(imm8); 3420 } 3421 3422 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 3423 assert(VM_Version::supports_sse4_1(), ""); 3424 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 3425 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3426 emit_int8(0x22); 3427 emit_int8((unsigned char)(0xC0 | encode)); 3428 emit_int8(imm8); 3429 } 3430 3431 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) { 3432 assert(VM_Version::supports_sse4_1(), ""); 3433 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 3434 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3435 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3436 emit_int8(0x22); 3437 emit_operand(dst,src); 3438 emit_int8(imm8); 3439 } 3440 3441 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 3442 assert(VM_Version::supports_sse4_1(), ""); 3443 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 3444 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3445 emit_int8(0x22); 3446 emit_int8((unsigned char)(0xC0 | encode)); 3447 emit_int8(imm8); 3448 } 3449 3450 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) { 3451 assert(VM_Version::supports_sse4_1(), ""); 3452 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false); 3453 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3454 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3455 emit_int8(0x22); 3456 emit_operand(dst, src); 3457 emit_int8(imm8); 3458 } 3459 3460 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { 3461 assert(VM_Version::supports_sse2(), ""); 3462 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3463 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3464 emit_int8((unsigned char)0xC4); 3465 emit_int8((unsigned char)(0xC0 | encode)); 3466 emit_int8(imm8); 3467 } 3468 3469 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) { 3470 assert(VM_Version::supports_sse2(), ""); 3471 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3472 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 3473 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3474 emit_int8((unsigned char)0xC4); 3475 emit_operand(dst, src); 3476 emit_int8(imm8); 3477 } 3478 3479 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) { 3480 assert(VM_Version::supports_sse4_1(), ""); 3481 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3482 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 3483 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3484 emit_int8(0x20); 3485 emit_operand(dst, src); 3486 emit_int8(imm8); 3487 } 3488 3489 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 3490 assert(VM_Version::supports_sse4_1(), ""); 3491 InstructionMark im(this); 3492 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3493 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3494 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3495 emit_int8(0x30); 3496 emit_operand(dst, src); 3497 } 3498 3499 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3500 assert(VM_Version::supports_sse4_1(), ""); 3501 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3502 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3503 emit_int8(0x30); 3504 emit_int8((unsigned char)(0xC0 | encode)); 3505 } 3506 3507 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3508 assert(VM_Version::supports_avx(), ""); 3509 InstructionMark im(this); 3510 assert(dst != xnoreg, "sanity"); 3511 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3512 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3513 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3514 emit_int8(0x30); 3515 emit_operand(dst, src); 3516 } 3517 3518 // generic 3519 void Assembler::pop(Register dst) { 3520 int encode = prefix_and_encode(dst->encoding()); 3521 emit_int8(0x58 | encode); 3522 } 3523 3524 void Assembler::popcntl(Register dst, Address src) { 3525 assert(VM_Version::supports_popcnt(), "must support"); 3526 InstructionMark im(this); 3527 emit_int8((unsigned char)0xF3); 3528 prefix(src, dst); 3529 emit_int8(0x0F); 3530 emit_int8((unsigned char)0xB8); 3531 emit_operand(dst, src); 3532 } 3533 3534 void Assembler::popcntl(Register dst, Register src) { 3535 assert(VM_Version::supports_popcnt(), "must support"); 3536 emit_int8((unsigned char)0xF3); 3537 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3538 emit_int8(0x0F); 3539 emit_int8((unsigned char)0xB8); 3540 emit_int8((unsigned char)(0xC0 | encode)); 3541 } 3542 3543 void Assembler::popf() { 3544 emit_int8((unsigned char)0x9D); 3545 } 3546 3547 #ifndef _LP64 // no 32bit push/pop on amd64 3548 void Assembler::popl(Address dst) { 3549 // NOTE: this will adjust stack by 8byte on 64bits 3550 InstructionMark im(this); 3551 prefix(dst); 3552 emit_int8((unsigned char)0x8F); 3553 emit_operand(rax, dst); 3554 } 3555 #endif 3556 3557 void Assembler::prefetch_prefix(Address src) { 3558 prefix(src); 3559 emit_int8(0x0F); 3560 } 3561 3562 void Assembler::prefetchnta(Address src) { 3563 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3564 InstructionMark im(this); 3565 prefetch_prefix(src); 3566 emit_int8(0x18); 3567 emit_operand(rax, src); // 0, src 3568 } 3569 3570 void Assembler::prefetchr(Address src) { 3571 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3572 InstructionMark im(this); 3573 prefetch_prefix(src); 3574 emit_int8(0x0D); 3575 emit_operand(rax, src); // 0, src 3576 } 3577 3578 void Assembler::prefetcht0(Address src) { 3579 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3580 InstructionMark im(this); 3581 prefetch_prefix(src); 3582 emit_int8(0x18); 3583 emit_operand(rcx, src); // 1, src 3584 } 3585 3586 void Assembler::prefetcht1(Address src) { 3587 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3588 InstructionMark im(this); 3589 prefetch_prefix(src); 3590 emit_int8(0x18); 3591 emit_operand(rdx, src); // 2, src 3592 } 3593 3594 void Assembler::prefetcht2(Address src) { 3595 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3596 InstructionMark im(this); 3597 prefetch_prefix(src); 3598 emit_int8(0x18); 3599 emit_operand(rbx, src); // 3, src 3600 } 3601 3602 void Assembler::prefetchw(Address src) { 3603 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3604 InstructionMark im(this); 3605 prefetch_prefix(src); 3606 emit_int8(0x0D); 3607 emit_operand(rcx, src); // 1, src 3608 } 3609 3610 void Assembler::prefix(Prefix p) { 3611 emit_int8(p); 3612 } 3613 3614 void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 3615 assert(VM_Version::supports_ssse3(), ""); 3616 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3617 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3618 emit_int8(0x00); 3619 emit_int8((unsigned char)(0xC0 | encode)); 3620 } 3621 3622 void Assembler::pshufb(XMMRegister dst, Address src) { 3623 assert(VM_Version::supports_ssse3(), ""); 3624 InstructionMark im(this); 3625 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3626 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3627 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3628 emit_int8(0x00); 3629 emit_operand(dst, src); 3630 } 3631 3632 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 3633 assert(isByte(mode), "invalid value"); 3634 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3635 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_128bit; 3636 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3637 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3638 emit_int8(0x70); 3639 emit_int8((unsigned char)(0xC0 | encode)); 3640 emit_int8(mode & 0xFF); 3641 } 3642 3643 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { 3644 assert(isByte(mode), "invalid value"); 3645 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3646 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3647 InstructionMark im(this); 3648 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3649 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3650 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3651 emit_int8(0x70); 3652 emit_operand(dst, src); 3653 emit_int8(mode & 0xFF); 3654 } 3655 3656 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3657 assert(isByte(mode), "invalid value"); 3658 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3659 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3660 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3661 emit_int8(0x70); 3662 emit_int8((unsigned char)(0xC0 | encode)); 3663 emit_int8(mode & 0xFF); 3664 } 3665 3666 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { 3667 assert(isByte(mode), "invalid value"); 3668 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3669 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3670 InstructionMark im(this); 3671 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3672 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3673 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3674 emit_int8(0x70); 3675 emit_operand(dst, src); 3676 emit_int8(mode & 0xFF); 3677 } 3678 3679 void Assembler::psrldq(XMMRegister dst, int shift) { 3680 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 3681 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3682 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3683 // XMM3 is for /3 encoding: 66 0F 73 /3 ib 3684 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3685 emit_int8(0x73); 3686 emit_int8((unsigned char)(0xC0 | encode)); 3687 emit_int8(shift); 3688 } 3689 3690 void Assembler::pslldq(XMMRegister dst, int shift) { 3691 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 3692 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3693 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false); 3694 // XMM7 is for /7 encoding: 66 0F 73 /7 ib 3695 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3696 emit_int8(0x73); 3697 emit_int8((unsigned char)(0xC0 | encode)); 3698 emit_int8(shift); 3699 } 3700 3701 void Assembler::ptest(XMMRegister dst, Address src) { 3702 assert(VM_Version::supports_sse4_1(), ""); 3703 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3704 InstructionMark im(this); 3705 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3706 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3707 emit_int8(0x17); 3708 emit_operand(dst, src); 3709 } 3710 3711 void Assembler::ptest(XMMRegister dst, XMMRegister src) { 3712 assert(VM_Version::supports_sse4_1(), ""); 3713 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3714 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3715 emit_int8(0x17); 3716 emit_int8((unsigned char)(0xC0 | encode)); 3717 } 3718 3719 void Assembler::vptest(XMMRegister dst, Address src) { 3720 assert(VM_Version::supports_avx(), ""); 3721 InstructionMark im(this); 3722 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3723 assert(dst != xnoreg, "sanity"); 3724 // swap src<->dst for encoding 3725 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3726 emit_int8(0x17); 3727 emit_operand(dst, src); 3728 } 3729 3730 void Assembler::vptest(XMMRegister dst, XMMRegister src) { 3731 assert(VM_Version::supports_avx(), ""); 3732 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3733 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3734 emit_int8(0x17); 3735 emit_int8((unsigned char)(0xC0 | encode)); 3736 } 3737 3738 void Assembler::punpcklbw(XMMRegister dst, Address src) { 3739 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3740 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3741 InstructionMark im(this); 3742 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ false, /* uses_vl */ true); 3743 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3744 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3745 emit_int8(0x60); 3746 emit_operand(dst, src); 3747 } 3748 3749 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3750 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3751 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ false, /* uses_vl */ true); 3752 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3753 emit_int8(0x60); 3754 emit_int8((unsigned char)(0xC0 | encode)); 3755 } 3756 3757 void Assembler::punpckldq(XMMRegister dst, Address src) { 3758 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3759 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3760 InstructionMark im(this); 3761 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3762 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3763 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3764 emit_int8(0x62); 3765 emit_operand(dst, src); 3766 } 3767 3768 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { 3769 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3770 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3771 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3772 emit_int8(0x62); 3773 emit_int8((unsigned char)(0xC0 | encode)); 3774 } 3775 3776 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 3777 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3778 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3779 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3780 emit_int8(0x6C); 3781 emit_int8((unsigned char)(0xC0 | encode)); 3782 } 3783 3784 void Assembler::push(int32_t imm32) { 3785 // in 64bits we push 64bits onto the stack but only 3786 // take a 32bit immediate 3787 emit_int8(0x68); 3788 emit_int32(imm32); 3789 } 3790 3791 void Assembler::push(Register src) { 3792 int encode = prefix_and_encode(src->encoding()); 3793 3794 emit_int8(0x50 | encode); 3795 } 3796 3797 void Assembler::pushf() { 3798 emit_int8((unsigned char)0x9C); 3799 } 3800 3801 #ifndef _LP64 // no 32bit push/pop on amd64 3802 void Assembler::pushl(Address src) { 3803 // Note this will push 64bit on 64bit 3804 InstructionMark im(this); 3805 prefix(src); 3806 emit_int8((unsigned char)0xFF); 3807 emit_operand(rsi, src); 3808 } 3809 #endif 3810 3811 void Assembler::rcll(Register dst, int imm8) { 3812 assert(isShiftCount(imm8), "illegal shift count"); 3813 int encode = prefix_and_encode(dst->encoding()); 3814 if (imm8 == 1) { 3815 emit_int8((unsigned char)0xD1); 3816 emit_int8((unsigned char)(0xD0 | encode)); 3817 } else { 3818 emit_int8((unsigned char)0xC1); 3819 emit_int8((unsigned char)0xD0 | encode); 3820 emit_int8(imm8); 3821 } 3822 } 3823 3824 void Assembler::rcpps(XMMRegister dst, XMMRegister src) { 3825 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3826 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3827 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 3828 emit_int8(0x53); 3829 emit_int8((unsigned char)(0xC0 | encode)); 3830 } 3831 3832 void Assembler::rcpss(XMMRegister dst, XMMRegister src) { 3833 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3834 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 3835 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 3836 emit_int8(0x53); 3837 emit_int8((unsigned char)(0xC0 | encode)); 3838 } 3839 3840 void Assembler::rdtsc() { 3841 emit_int8((unsigned char)0x0F); 3842 emit_int8((unsigned char)0x31); 3843 } 3844 3845 // copies data from [esi] to [edi] using rcx pointer sized words 3846 // generic 3847 void Assembler::rep_mov() { 3848 emit_int8((unsigned char)0xF3); 3849 // MOVSQ 3850 LP64_ONLY(prefix(REX_W)); 3851 emit_int8((unsigned char)0xA5); 3852 } 3853 3854 // sets rcx bytes with rax, value at [edi] 3855 void Assembler::rep_stosb() { 3856 emit_int8((unsigned char)0xF3); // REP 3857 LP64_ONLY(prefix(REX_W)); 3858 emit_int8((unsigned char)0xAA); // STOSB 3859 } 3860 3861 // sets rcx pointer sized words with rax, value at [edi] 3862 // generic 3863 void Assembler::rep_stos() { 3864 emit_int8((unsigned char)0xF3); // REP 3865 LP64_ONLY(prefix(REX_W)); // LP64:STOSQ, LP32:STOSD 3866 emit_int8((unsigned char)0xAB); 3867 } 3868 3869 // scans rcx pointer sized words at [edi] for occurance of rax, 3870 // generic 3871 void Assembler::repne_scan() { // repne_scan 3872 emit_int8((unsigned char)0xF2); 3873 // SCASQ 3874 LP64_ONLY(prefix(REX_W)); 3875 emit_int8((unsigned char)0xAF); 3876 } 3877 3878 #ifdef _LP64 3879 // scans rcx 4 byte words at [edi] for occurance of rax, 3880 // generic 3881 void Assembler::repne_scanl() { // repne_scan 3882 emit_int8((unsigned char)0xF2); 3883 // SCASL 3884 emit_int8((unsigned char)0xAF); 3885 } 3886 #endif 3887 3888 void Assembler::ret(int imm16) { 3889 if (imm16 == 0) { 3890 emit_int8((unsigned char)0xC3); 3891 } else { 3892 emit_int8((unsigned char)0xC2); 3893 emit_int16(imm16); 3894 } 3895 } 3896 3897 void Assembler::sahf() { 3898 #ifdef _LP64 3899 // Not supported in 64bit mode 3900 ShouldNotReachHere(); 3901 #endif 3902 emit_int8((unsigned char)0x9E); 3903 } 3904 3905 void Assembler::sarl(Register dst, int imm8) { 3906 int encode = prefix_and_encode(dst->encoding()); 3907 assert(isShiftCount(imm8), "illegal shift count"); 3908 if (imm8 == 1) { 3909 emit_int8((unsigned char)0xD1); 3910 emit_int8((unsigned char)(0xF8 | encode)); 3911 } else { 3912 emit_int8((unsigned char)0xC1); 3913 emit_int8((unsigned char)(0xF8 | encode)); 3914 emit_int8(imm8); 3915 } 3916 } 3917 3918 void Assembler::sarl(Register dst) { 3919 int encode = prefix_and_encode(dst->encoding()); 3920 emit_int8((unsigned char)0xD3); 3921 emit_int8((unsigned char)(0xF8 | encode)); 3922 } 3923 3924 void Assembler::sbbl(Address dst, int32_t imm32) { 3925 InstructionMark im(this); 3926 prefix(dst); 3927 emit_arith_operand(0x81, rbx, dst, imm32); 3928 } 3929 3930 void Assembler::sbbl(Register dst, int32_t imm32) { 3931 prefix(dst); 3932 emit_arith(0x81, 0xD8, dst, imm32); 3933 } 3934 3935 3936 void Assembler::sbbl(Register dst, Address src) { 3937 InstructionMark im(this); 3938 prefix(src, dst); 3939 emit_int8(0x1B); 3940 emit_operand(dst, src); 3941 } 3942 3943 void Assembler::sbbl(Register dst, Register src) { 3944 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3945 emit_arith(0x1B, 0xC0, dst, src); 3946 } 3947 3948 void Assembler::setb(Condition cc, Register dst) { 3949 assert(0 <= cc && cc < 16, "illegal cc"); 3950 int encode = prefix_and_encode(dst->encoding(), true); 3951 emit_int8(0x0F); 3952 emit_int8((unsigned char)0x90 | cc); 3953 emit_int8((unsigned char)(0xC0 | encode)); 3954 } 3955 3956 void Assembler::shll(Register dst, int imm8) { 3957 assert(isShiftCount(imm8), "illegal shift count"); 3958 int encode = prefix_and_encode(dst->encoding()); 3959 if (imm8 == 1 ) { 3960 emit_int8((unsigned char)0xD1); 3961 emit_int8((unsigned char)(0xE0 | encode)); 3962 } else { 3963 emit_int8((unsigned char)0xC1); 3964 emit_int8((unsigned char)(0xE0 | encode)); 3965 emit_int8(imm8); 3966 } 3967 } 3968 3969 void Assembler::shll(Register dst) { 3970 int encode = prefix_and_encode(dst->encoding()); 3971 emit_int8((unsigned char)0xD3); 3972 emit_int8((unsigned char)(0xE0 | encode)); 3973 } 3974 3975 void Assembler::shrl(Register dst, int imm8) { 3976 assert(isShiftCount(imm8), "illegal shift count"); 3977 int encode = prefix_and_encode(dst->encoding()); 3978 emit_int8((unsigned char)0xC1); 3979 emit_int8((unsigned char)(0xE8 | encode)); 3980 emit_int8(imm8); 3981 } 3982 3983 void Assembler::shrl(Register dst) { 3984 int encode = prefix_and_encode(dst->encoding()); 3985 emit_int8((unsigned char)0xD3); 3986 emit_int8((unsigned char)(0xE8 | encode)); 3987 } 3988 3989 // copies a single word from [esi] to [edi] 3990 void Assembler::smovl() { 3991 emit_int8((unsigned char)0xA5); 3992 } 3993 3994 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { 3995 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3996 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 3997 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 3998 emit_int8(0x51); 3999 emit_int8((unsigned char)(0xC0 | encode)); 4000 } 4001 4002 void Assembler::sqrtsd(XMMRegister dst, Address src) { 4003 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4004 InstructionMark im(this); 4005 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4006 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4007 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4008 emit_int8(0x51); 4009 emit_operand(dst, src); 4010 } 4011 4012 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 4013 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4014 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4015 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4016 emit_int8(0x51); 4017 emit_int8((unsigned char)(0xC0 | encode)); 4018 } 4019 4020 void Assembler::std() { 4021 emit_int8((unsigned char)0xFD); 4022 } 4023 4024 void Assembler::sqrtss(XMMRegister dst, Address src) { 4025 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4026 InstructionMark im(this); 4027 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4028 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4029 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4030 emit_int8(0x51); 4031 emit_operand(dst, src); 4032 } 4033 4034 void Assembler::stmxcsr( Address dst) { 4035 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4036 InstructionMark im(this); 4037 prefix(dst); 4038 emit_int8(0x0F); 4039 emit_int8((unsigned char)0xAE); 4040 emit_operand(as_Register(3), dst); 4041 } 4042 4043 void Assembler::subl(Address dst, int32_t imm32) { 4044 InstructionMark im(this); 4045 prefix(dst); 4046 emit_arith_operand(0x81, rbp, dst, imm32); 4047 } 4048 4049 void Assembler::subl(Address dst, Register src) { 4050 InstructionMark im(this); 4051 prefix(dst, src); 4052 emit_int8(0x29); 4053 emit_operand(src, dst); 4054 } 4055 4056 void Assembler::subl(Register dst, int32_t imm32) { 4057 prefix(dst); 4058 emit_arith(0x81, 0xE8, dst, imm32); 4059 } 4060 4061 // Force generation of a 4 byte immediate value even if it fits into 8bit 4062 void Assembler::subl_imm32(Register dst, int32_t imm32) { 4063 prefix(dst); 4064 emit_arith_imm32(0x81, 0xE8, dst, imm32); 4065 } 4066 4067 void Assembler::subl(Register dst, Address src) { 4068 InstructionMark im(this); 4069 prefix(src, dst); 4070 emit_int8(0x2B); 4071 emit_operand(dst, src); 4072 } 4073 4074 void Assembler::subl(Register dst, Register src) { 4075 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4076 emit_arith(0x2B, 0xC0, dst, src); 4077 } 4078 4079 void Assembler::subsd(XMMRegister dst, XMMRegister src) { 4080 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4081 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4082 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4083 emit_int8(0x5C); 4084 emit_int8((unsigned char)(0xC0 | encode)); 4085 } 4086 4087 void Assembler::subsd(XMMRegister dst, Address src) { 4088 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4089 InstructionMark im(this); 4090 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4091 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4092 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4093 emit_int8(0x5C); 4094 emit_operand(dst, src); 4095 } 4096 4097 void Assembler::subss(XMMRegister dst, XMMRegister src) { 4098 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4099 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ false); 4100 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4101 emit_int8(0x5C); 4102 emit_int8((unsigned char)(0xC0 | encode)); 4103 } 4104 4105 void Assembler::subss(XMMRegister dst, Address src) { 4106 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4107 InstructionMark im(this); 4108 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4109 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4110 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4111 emit_int8(0x5C); 4112 emit_operand(dst, src); 4113 } 4114 4115 void Assembler::testb(Register dst, int imm8) { 4116 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 4117 (void) prefix_and_encode(dst->encoding(), true); 4118 emit_arith_b(0xF6, 0xC0, dst, imm8); 4119 } 4120 4121 void Assembler::testb(Address dst, int imm8) { 4122 InstructionMark im(this); 4123 prefix(dst); 4124 emit_int8((unsigned char)0xF6); 4125 emit_operand(rax, dst, 1); 4126 emit_int8(imm8); 4127 } 4128 4129 void Assembler::testl(Register dst, int32_t imm32) { 4130 // not using emit_arith because test 4131 // doesn't support sign-extension of 4132 // 8bit operands 4133 int encode = dst->encoding(); 4134 if (encode == 0) { 4135 emit_int8((unsigned char)0xA9); 4136 } else { 4137 encode = prefix_and_encode(encode); 4138 emit_int8((unsigned char)0xF7); 4139 emit_int8((unsigned char)(0xC0 | encode)); 4140 } 4141 emit_int32(imm32); 4142 } 4143 4144 void Assembler::testl(Register dst, Register src) { 4145 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4146 emit_arith(0x85, 0xC0, dst, src); 4147 } 4148 4149 void Assembler::testl(Register dst, Address src) { 4150 InstructionMark im(this); 4151 prefix(src, dst); 4152 emit_int8((unsigned char)0x85); 4153 emit_operand(dst, src); 4154 } 4155 4156 void Assembler::tzcntl(Register dst, Register src) { 4157 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 4158 emit_int8((unsigned char)0xF3); 4159 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 4160 emit_int8(0x0F); 4161 emit_int8((unsigned char)0xBC); 4162 emit_int8((unsigned char)0xC0 | encode); 4163 } 4164 4165 void Assembler::tzcntq(Register dst, Register src) { 4166 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 4167 emit_int8((unsigned char)0xF3); 4168 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 4169 emit_int8(0x0F); 4170 emit_int8((unsigned char)0xBC); 4171 emit_int8((unsigned char)(0xC0 | encode)); 4172 } 4173 4174 void Assembler::ucomisd(XMMRegister dst, Address src) { 4175 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4176 InstructionMark im(this); 4177 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4178 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4179 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4180 emit_int8(0x2E); 4181 emit_operand(dst, src); 4182 } 4183 4184 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { 4185 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4186 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4187 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4188 emit_int8(0x2E); 4189 emit_int8((unsigned char)(0xC0 | encode)); 4190 } 4191 4192 void Assembler::ucomiss(XMMRegister dst, Address src) { 4193 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4194 InstructionMark im(this); 4195 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4196 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4197 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4198 emit_int8(0x2E); 4199 emit_operand(dst, src); 4200 } 4201 4202 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { 4203 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4204 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4205 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4206 emit_int8(0x2E); 4207 emit_int8((unsigned char)(0xC0 | encode)); 4208 } 4209 4210 void Assembler::xabort(int8_t imm8) { 4211 emit_int8((unsigned char)0xC6); 4212 emit_int8((unsigned char)0xF8); 4213 emit_int8((unsigned char)(imm8 & 0xFF)); 4214 } 4215 4216 void Assembler::xaddl(Address dst, Register src) { 4217 InstructionMark im(this); 4218 prefix(dst, src); 4219 emit_int8(0x0F); 4220 emit_int8((unsigned char)0xC1); 4221 emit_operand(src, dst); 4222 } 4223 4224 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { 4225 InstructionMark im(this); 4226 relocate(rtype); 4227 if (abort.is_bound()) { 4228 address entry = target(abort); 4229 assert(entry != NULL, "abort entry NULL"); 4230 intptr_t offset = entry - pc(); 4231 emit_int8((unsigned char)0xC7); 4232 emit_int8((unsigned char)0xF8); 4233 emit_int32(offset - 6); // 2 opcode + 4 address 4234 } else { 4235 abort.add_patch_at(code(), locator()); 4236 emit_int8((unsigned char)0xC7); 4237 emit_int8((unsigned char)0xF8); 4238 emit_int32(0); 4239 } 4240 } 4241 4242 void Assembler::xchgl(Register dst, Address src) { // xchg 4243 InstructionMark im(this); 4244 prefix(src, dst); 4245 emit_int8((unsigned char)0x87); 4246 emit_operand(dst, src); 4247 } 4248 4249 void Assembler::xchgl(Register dst, Register src) { 4250 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 4251 emit_int8((unsigned char)0x87); 4252 emit_int8((unsigned char)(0xC0 | encode)); 4253 } 4254 4255 void Assembler::xend() { 4256 emit_int8((unsigned char)0x0F); 4257 emit_int8((unsigned char)0x01); 4258 emit_int8((unsigned char)0xD5); 4259 } 4260 4261 void Assembler::xgetbv() { 4262 emit_int8(0x0F); 4263 emit_int8(0x01); 4264 emit_int8((unsigned char)0xD0); 4265 } 4266 4267 void Assembler::xorl(Register dst, int32_t imm32) { 4268 prefix(dst); 4269 emit_arith(0x81, 0xF0, dst, imm32); 4270 } 4271 4272 void Assembler::xorl(Register dst, Address src) { 4273 InstructionMark im(this); 4274 prefix(src, dst); 4275 emit_int8(0x33); 4276 emit_operand(dst, src); 4277 } 4278 4279 void Assembler::xorl(Register dst, Register src) { 4280 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4281 emit_arith(0x33, 0xC0, dst, src); 4282 } 4283 4284 void Assembler::xorb(Register dst, Address src) { 4285 InstructionMark im(this); 4286 prefix(src, dst); 4287 emit_int8(0x32); 4288 emit_operand(dst, src); 4289 } 4290 4291 // AVX 3-operands scalar float-point arithmetic instructions 4292 4293 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { 4294 assert(VM_Version::supports_avx(), ""); 4295 InstructionMark im(this); 4296 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4297 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4298 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4299 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4300 emit_int8(0x58); 4301 emit_operand(dst, src); 4302 } 4303 4304 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4305 assert(VM_Version::supports_avx(), ""); 4306 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4307 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4308 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4309 emit_int8(0x58); 4310 emit_int8((unsigned char)(0xC0 | encode)); 4311 } 4312 4313 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { 4314 assert(VM_Version::supports_avx(), ""); 4315 InstructionMark im(this); 4316 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4317 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4318 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4319 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4320 emit_int8(0x58); 4321 emit_operand(dst, src); 4322 } 4323 4324 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4325 assert(VM_Version::supports_avx(), ""); 4326 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4327 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4328 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4329 emit_int8(0x58); 4330 emit_int8((unsigned char)(0xC0 | encode)); 4331 } 4332 4333 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { 4334 assert(VM_Version::supports_avx(), ""); 4335 InstructionMark im(this); 4336 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4337 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4338 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4339 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4340 emit_int8(0x5E); 4341 emit_operand(dst, src); 4342 } 4343 4344 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4345 assert(VM_Version::supports_avx(), ""); 4346 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4347 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4348 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4349 emit_int8(0x5E); 4350 emit_int8((unsigned char)(0xC0 | encode)); 4351 } 4352 4353 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { 4354 assert(VM_Version::supports_avx(), ""); 4355 InstructionMark im(this); 4356 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4357 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4358 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4359 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4360 emit_int8(0x5E); 4361 emit_operand(dst, src); 4362 } 4363 4364 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4365 assert(VM_Version::supports_avx(), ""); 4366 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4367 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4368 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4369 emit_int8(0x5E); 4370 emit_int8((unsigned char)(0xC0 | encode)); 4371 } 4372 4373 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { 4374 assert(VM_Version::supports_avx(), ""); 4375 InstructionMark im(this); 4376 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4377 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4378 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4379 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4380 emit_int8(0x59); 4381 emit_operand(dst, src); 4382 } 4383 4384 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4385 assert(VM_Version::supports_avx(), ""); 4386 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4387 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4388 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4389 emit_int8(0x59); 4390 emit_int8((unsigned char)(0xC0 | encode)); 4391 } 4392 4393 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { 4394 assert(VM_Version::supports_avx(), ""); 4395 InstructionMark im(this); 4396 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4397 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4398 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4399 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4400 emit_int8(0x59); 4401 emit_operand(dst, src); 4402 } 4403 4404 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4405 assert(VM_Version::supports_avx(), ""); 4406 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4407 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4408 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4409 emit_int8(0x59); 4410 emit_int8((unsigned char)(0xC0 | encode)); 4411 } 4412 4413 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { 4414 assert(VM_Version::supports_avx(), ""); 4415 InstructionMark im(this); 4416 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4417 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4418 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4419 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4420 emit_int8(0x5C); 4421 emit_operand(dst, src); 4422 } 4423 4424 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4425 assert(VM_Version::supports_avx(), ""); 4426 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4427 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4428 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4429 emit_int8(0x5C); 4430 emit_int8((unsigned char)(0xC0 | encode)); 4431 } 4432 4433 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { 4434 assert(VM_Version::supports_avx(), ""); 4435 InstructionMark im(this); 4436 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4437 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4438 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4439 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4440 emit_int8(0x5C); 4441 emit_operand(dst, src); 4442 } 4443 4444 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4445 assert(VM_Version::supports_avx(), ""); 4446 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 4447 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4448 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4449 emit_int8(0x5C); 4450 emit_int8((unsigned char)(0xC0 | encode)); 4451 } 4452 4453 //====================VECTOR ARITHMETIC===================================== 4454 4455 // Float-point vector arithmetic 4456 4457 void Assembler::addpd(XMMRegister dst, XMMRegister src) { 4458 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4459 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4460 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4461 emit_int8(0x58); 4462 emit_int8((unsigned char)(0xC0 | encode)); 4463 } 4464 4465 void Assembler::addpd(XMMRegister dst, Address src) { 4466 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4467 InstructionMark im(this); 4468 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4469 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4470 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4471 emit_int8(0x58); 4472 emit_operand(dst, src); 4473 } 4474 4475 4476 void Assembler::addps(XMMRegister dst, XMMRegister src) { 4477 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4478 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4479 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4480 emit_int8(0x58); 4481 emit_int8((unsigned char)(0xC0 | encode)); 4482 } 4483 4484 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4485 assert(VM_Version::supports_avx(), ""); 4486 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4487 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4488 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4489 emit_int8(0x58); 4490 emit_int8((unsigned char)(0xC0 | encode)); 4491 } 4492 4493 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4494 assert(VM_Version::supports_avx(), ""); 4495 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4496 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4497 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4498 emit_int8(0x58); 4499 emit_int8((unsigned char)(0xC0 | encode)); 4500 } 4501 4502 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4503 assert(VM_Version::supports_avx(), ""); 4504 InstructionMark im(this); 4505 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4506 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4507 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4508 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4509 emit_int8(0x58); 4510 emit_operand(dst, src); 4511 } 4512 4513 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4514 assert(VM_Version::supports_avx(), ""); 4515 InstructionMark im(this); 4516 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4517 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4518 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4519 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4520 emit_int8(0x58); 4521 emit_operand(dst, src); 4522 } 4523 4524 void Assembler::subpd(XMMRegister dst, XMMRegister src) { 4525 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4526 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4527 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4528 emit_int8(0x5C); 4529 emit_int8((unsigned char)(0xC0 | encode)); 4530 } 4531 4532 void Assembler::subps(XMMRegister dst, XMMRegister src) { 4533 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4534 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4535 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4536 emit_int8(0x5C); 4537 emit_int8((unsigned char)(0xC0 | encode)); 4538 } 4539 4540 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4541 assert(VM_Version::supports_avx(), ""); 4542 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4543 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4544 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4545 emit_int8(0x5C); 4546 emit_int8((unsigned char)(0xC0 | encode)); 4547 } 4548 4549 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4550 assert(VM_Version::supports_avx(), ""); 4551 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4552 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4553 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4554 emit_int8(0x5C); 4555 emit_int8((unsigned char)(0xC0 | encode)); 4556 } 4557 4558 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4559 assert(VM_Version::supports_avx(), ""); 4560 InstructionMark im(this); 4561 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4562 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4563 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4564 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4565 emit_int8(0x5C); 4566 emit_operand(dst, src); 4567 } 4568 4569 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4570 assert(VM_Version::supports_avx(), ""); 4571 InstructionMark im(this); 4572 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4573 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4574 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4575 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4576 emit_int8(0x5C); 4577 emit_operand(dst, src); 4578 } 4579 4580 void Assembler::mulpd(XMMRegister dst, XMMRegister src) { 4581 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4582 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4583 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4584 emit_int8(0x59); 4585 emit_int8((unsigned char)(0xC0 | encode)); 4586 } 4587 4588 void Assembler::mulpd(XMMRegister dst, Address src) { 4589 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4590 InstructionMark im(this); 4591 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4592 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4593 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4594 emit_int8(0x59); 4595 emit_operand(dst, src); 4596 } 4597 4598 void Assembler::mulps(XMMRegister dst, XMMRegister src) { 4599 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4600 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4601 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4602 emit_int8(0x59); 4603 emit_int8((unsigned char)(0xC0 | encode)); 4604 } 4605 4606 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4607 assert(VM_Version::supports_avx(), ""); 4608 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4609 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4610 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4611 emit_int8(0x59); 4612 emit_int8((unsigned char)(0xC0 | encode)); 4613 } 4614 4615 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4616 assert(VM_Version::supports_avx(), ""); 4617 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4618 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4619 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4620 emit_int8(0x59); 4621 emit_int8((unsigned char)(0xC0 | encode)); 4622 } 4623 4624 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4625 assert(VM_Version::supports_avx(), ""); 4626 InstructionMark im(this); 4627 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4628 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4629 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4630 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4631 emit_int8(0x59); 4632 emit_operand(dst, src); 4633 } 4634 4635 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4636 assert(VM_Version::supports_avx(), ""); 4637 InstructionMark im(this); 4638 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4639 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4640 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4641 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4642 emit_int8(0x59); 4643 emit_operand(dst, src); 4644 } 4645 4646 void Assembler::divpd(XMMRegister dst, XMMRegister src) { 4647 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4648 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4649 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4650 emit_int8(0x5E); 4651 emit_int8((unsigned char)(0xC0 | encode)); 4652 } 4653 4654 void Assembler::divps(XMMRegister dst, XMMRegister src) { 4655 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4656 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4657 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4658 emit_int8(0x5E); 4659 emit_int8((unsigned char)(0xC0 | encode)); 4660 } 4661 4662 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4663 assert(VM_Version::supports_avx(), ""); 4664 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4665 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4666 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4667 emit_int8(0x5E); 4668 emit_int8((unsigned char)(0xC0 | encode)); 4669 } 4670 4671 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4672 assert(VM_Version::supports_avx(), ""); 4673 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4674 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4675 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4676 emit_int8(0x5E); 4677 emit_int8((unsigned char)(0xC0 | encode)); 4678 } 4679 4680 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4681 assert(VM_Version::supports_avx(), ""); 4682 InstructionMark im(this); 4683 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4684 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4685 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4686 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4687 emit_int8(0x5E); 4688 emit_operand(dst, src); 4689 } 4690 4691 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4692 assert(VM_Version::supports_avx(), ""); 4693 InstructionMark im(this); 4694 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4695 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4696 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4697 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4698 emit_int8(0x5E); 4699 emit_operand(dst, src); 4700 } 4701 4702 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) { 4703 assert(VM_Version::supports_avx(), ""); 4704 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4705 int nds_enc = 0; 4706 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4707 emit_int8(0x51); 4708 emit_int8((unsigned char)(0xC0 | encode)); 4709 } 4710 4711 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) { 4712 assert(VM_Version::supports_avx(), ""); 4713 InstructionMark im(this); 4714 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4715 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4716 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4717 emit_int8(0x51); 4718 emit_operand(dst, src); 4719 } 4720 4721 void Assembler::andpd(XMMRegister dst, XMMRegister src) { 4722 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4723 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4724 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4725 emit_int8(0x54); 4726 emit_int8((unsigned char)(0xC0 | encode)); 4727 } 4728 4729 void Assembler::andps(XMMRegister dst, XMMRegister src) { 4730 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4731 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4732 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4733 emit_int8(0x54); 4734 emit_int8((unsigned char)(0xC0 | encode)); 4735 } 4736 4737 void Assembler::andps(XMMRegister dst, Address src) { 4738 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4739 InstructionMark im(this); 4740 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4741 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4742 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4743 emit_int8(0x54); 4744 emit_operand(dst, src); 4745 } 4746 4747 void Assembler::andpd(XMMRegister dst, Address src) { 4748 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4749 InstructionMark im(this); 4750 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4751 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4752 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4753 emit_int8(0x54); 4754 emit_operand(dst, src); 4755 } 4756 4757 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4758 assert(VM_Version::supports_avx(), ""); 4759 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4760 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4761 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4762 emit_int8(0x54); 4763 emit_int8((unsigned char)(0xC0 | encode)); 4764 } 4765 4766 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4767 assert(VM_Version::supports_avx(), ""); 4768 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4769 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4770 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4771 emit_int8(0x54); 4772 emit_int8((unsigned char)(0xC0 | encode)); 4773 } 4774 4775 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4776 assert(VM_Version::supports_avx(), ""); 4777 InstructionMark im(this); 4778 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4779 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4780 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4781 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4782 emit_int8(0x54); 4783 emit_operand(dst, src); 4784 } 4785 4786 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4787 assert(VM_Version::supports_avx(), ""); 4788 InstructionMark im(this); 4789 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4790 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4791 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4792 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4793 emit_int8(0x54); 4794 emit_operand(dst, src); 4795 } 4796 4797 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) { 4798 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4799 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4800 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4801 emit_int8(0x15); 4802 emit_int8((unsigned char)(0xC0 | encode)); 4803 } 4804 4805 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { 4806 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4807 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4808 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4809 emit_int8(0x14); 4810 emit_int8((unsigned char)(0xC0 | encode)); 4811 } 4812 4813 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { 4814 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4815 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4816 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4817 emit_int8(0x57); 4818 emit_int8((unsigned char)(0xC0 | encode)); 4819 } 4820 4821 void Assembler::xorps(XMMRegister dst, XMMRegister src) { 4822 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4823 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4824 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4825 emit_int8(0x57); 4826 emit_int8((unsigned char)(0xC0 | encode)); 4827 } 4828 4829 void Assembler::xorpd(XMMRegister dst, Address src) { 4830 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4831 InstructionMark im(this); 4832 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4833 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4834 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4835 emit_int8(0x57); 4836 emit_operand(dst, src); 4837 } 4838 4839 void Assembler::xorps(XMMRegister dst, Address src) { 4840 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4841 InstructionMark im(this); 4842 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4843 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4844 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4845 emit_int8(0x57); 4846 emit_operand(dst, src); 4847 } 4848 4849 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4850 assert(VM_Version::supports_avx(), ""); 4851 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4852 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4853 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4854 emit_int8(0x57); 4855 emit_int8((unsigned char)(0xC0 | encode)); 4856 } 4857 4858 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4859 assert(VM_Version::supports_avx(), ""); 4860 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4861 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4862 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4863 emit_int8(0x57); 4864 emit_int8((unsigned char)(0xC0 | encode)); 4865 } 4866 4867 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4868 assert(VM_Version::supports_avx(), ""); 4869 InstructionMark im(this); 4870 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4871 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4872 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4873 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4874 emit_int8(0x57); 4875 emit_operand(dst, src); 4876 } 4877 4878 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4879 assert(VM_Version::supports_avx(), ""); 4880 InstructionMark im(this); 4881 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 4882 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4883 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4884 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4885 emit_int8(0x57); 4886 emit_operand(dst, src); 4887 } 4888 4889 // Integer vector arithmetic 4890 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4891 assert(VM_Version::supports_avx() && (vector_len == 0) || 4892 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 4893 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4894 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4895 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4896 emit_int8(0x01); 4897 emit_int8((unsigned char)(0xC0 | encode)); 4898 } 4899 4900 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4901 assert(VM_Version::supports_avx() && (vector_len == 0) || 4902 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 4903 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4904 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4905 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4906 emit_int8(0x02); 4907 emit_int8((unsigned char)(0xC0 | encode)); 4908 } 4909 4910 void Assembler::paddb(XMMRegister dst, XMMRegister src) { 4911 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4912 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4913 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4914 emit_int8((unsigned char)0xFC); 4915 emit_int8((unsigned char)(0xC0 | encode)); 4916 } 4917 4918 void Assembler::paddw(XMMRegister dst, XMMRegister src) { 4919 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4920 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4921 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4922 emit_int8((unsigned char)0xFD); 4923 emit_int8((unsigned char)(0xC0 | encode)); 4924 } 4925 4926 void Assembler::paddd(XMMRegister dst, XMMRegister src) { 4927 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4928 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4929 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4930 emit_int8((unsigned char)0xFE); 4931 emit_int8((unsigned char)(0xC0 | encode)); 4932 } 4933 4934 void Assembler::paddq(XMMRegister dst, XMMRegister src) { 4935 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4936 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4937 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4938 emit_int8((unsigned char)0xD4); 4939 emit_int8((unsigned char)(0xC0 | encode)); 4940 } 4941 4942 void Assembler::phaddw(XMMRegister dst, XMMRegister src) { 4943 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 4944 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4945 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4946 emit_int8(0x01); 4947 emit_int8((unsigned char)(0xC0 | encode)); 4948 } 4949 4950 void Assembler::phaddd(XMMRegister dst, XMMRegister src) { 4951 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 4952 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 4953 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4954 emit_int8(0x02); 4955 emit_int8((unsigned char)(0xC0 | encode)); 4956 } 4957 4958 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4959 assert(UseAVX > 0, "requires some form of AVX"); 4960 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4961 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4962 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4963 emit_int8((unsigned char)0xFC); 4964 emit_int8((unsigned char)(0xC0 | encode)); 4965 } 4966 4967 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4968 assert(UseAVX > 0, "requires some form of AVX"); 4969 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4970 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4971 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4972 emit_int8((unsigned char)0xFD); 4973 emit_int8((unsigned char)(0xC0 | encode)); 4974 } 4975 4976 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4977 assert(UseAVX > 0, "requires some form of AVX"); 4978 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4979 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4980 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4981 emit_int8((unsigned char)0xFE); 4982 emit_int8((unsigned char)(0xC0 | encode)); 4983 } 4984 4985 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4986 assert(UseAVX > 0, "requires some form of AVX"); 4987 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 4988 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4989 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4990 emit_int8((unsigned char)0xD4); 4991 emit_int8((unsigned char)(0xC0 | encode)); 4992 } 4993 4994 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4995 assert(UseAVX > 0, "requires some form of AVX"); 4996 InstructionMark im(this); 4997 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4998 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4999 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5000 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5001 emit_int8((unsigned char)0xFC); 5002 emit_operand(dst, src); 5003 } 5004 5005 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5006 assert(UseAVX > 0, "requires some form of AVX"); 5007 InstructionMark im(this); 5008 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5009 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5010 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5011 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5012 emit_int8((unsigned char)0xFD); 5013 emit_operand(dst, src); 5014 } 5015 5016 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5017 assert(UseAVX > 0, "requires some form of AVX"); 5018 InstructionMark im(this); 5019 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5020 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5021 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5022 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5023 emit_int8((unsigned char)0xFE); 5024 emit_operand(dst, src); 5025 } 5026 5027 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5028 assert(UseAVX > 0, "requires some form of AVX"); 5029 InstructionMark im(this); 5030 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5031 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5032 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5033 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5034 emit_int8((unsigned char)0xD4); 5035 emit_operand(dst, src); 5036 } 5037 5038 void Assembler::psubb(XMMRegister dst, XMMRegister src) { 5039 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5040 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5041 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5042 emit_int8((unsigned char)0xF8); 5043 emit_int8((unsigned char)(0xC0 | encode)); 5044 } 5045 5046 void Assembler::psubw(XMMRegister dst, XMMRegister src) { 5047 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5048 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5049 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5050 emit_int8((unsigned char)0xF9); 5051 emit_int8((unsigned char)(0xC0 | encode)); 5052 } 5053 5054 void Assembler::psubd(XMMRegister dst, XMMRegister src) { 5055 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5056 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5057 emit_int8((unsigned char)0xFA); 5058 emit_int8((unsigned char)(0xC0 | encode)); 5059 } 5060 5061 void Assembler::psubq(XMMRegister dst, XMMRegister src) { 5062 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5063 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5064 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5065 emit_int8((unsigned char)0xFB); 5066 emit_int8((unsigned char)(0xC0 | encode)); 5067 } 5068 5069 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5070 assert(UseAVX > 0, "requires some form of AVX"); 5071 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5072 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5073 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5074 emit_int8((unsigned char)0xF8); 5075 emit_int8((unsigned char)(0xC0 | encode)); 5076 } 5077 5078 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5079 assert(UseAVX > 0, "requires some form of AVX"); 5080 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5081 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5082 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5083 emit_int8((unsigned char)0xF9); 5084 emit_int8((unsigned char)(0xC0 | encode)); 5085 } 5086 5087 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5088 assert(UseAVX > 0, "requires some form of AVX"); 5089 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5090 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5091 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5092 emit_int8((unsigned char)0xFA); 5093 emit_int8((unsigned char)(0xC0 | encode)); 5094 } 5095 5096 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5097 assert(UseAVX > 0, "requires some form of AVX"); 5098 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5099 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5100 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5101 emit_int8((unsigned char)0xFB); 5102 emit_int8((unsigned char)(0xC0 | encode)); 5103 } 5104 5105 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5106 assert(UseAVX > 0, "requires some form of AVX"); 5107 InstructionMark im(this); 5108 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5109 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5110 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5111 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5112 emit_int8((unsigned char)0xF8); 5113 emit_operand(dst, src); 5114 } 5115 5116 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5117 assert(UseAVX > 0, "requires some form of AVX"); 5118 InstructionMark im(this); 5119 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5120 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5121 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5122 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5123 emit_int8((unsigned char)0xF9); 5124 emit_operand(dst, src); 5125 } 5126 5127 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5128 assert(UseAVX > 0, "requires some form of AVX"); 5129 InstructionMark im(this); 5130 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5131 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5132 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5133 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5134 emit_int8((unsigned char)0xFA); 5135 emit_operand(dst, src); 5136 } 5137 5138 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5139 assert(UseAVX > 0, "requires some form of AVX"); 5140 InstructionMark im(this); 5141 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5142 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5143 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5144 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5145 emit_int8((unsigned char)0xFB); 5146 emit_operand(dst, src); 5147 } 5148 5149 void Assembler::pmullw(XMMRegister dst, XMMRegister src) { 5150 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5151 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5152 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5153 emit_int8((unsigned char)0xD5); 5154 emit_int8((unsigned char)(0xC0 | encode)); 5155 } 5156 5157 void Assembler::pmulld(XMMRegister dst, XMMRegister src) { 5158 assert(VM_Version::supports_sse4_1(), ""); 5159 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5160 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5161 emit_int8(0x40); 5162 emit_int8((unsigned char)(0xC0 | encode)); 5163 } 5164 5165 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5166 assert(UseAVX > 0, "requires some form of AVX"); 5167 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5168 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5169 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5170 emit_int8((unsigned char)0xD5); 5171 emit_int8((unsigned char)(0xC0 | encode)); 5172 } 5173 5174 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5175 assert(UseAVX > 0, "requires some form of AVX"); 5176 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5177 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5178 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5179 emit_int8(0x40); 5180 emit_int8((unsigned char)(0xC0 | encode)); 5181 } 5182 5183 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5184 assert(UseAVX > 2, "requires some form of AVX"); 5185 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 5186 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5187 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5188 emit_int8(0x40); 5189 emit_int8((unsigned char)(0xC0 | encode)); 5190 } 5191 5192 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5193 assert(UseAVX > 0, "requires some form of AVX"); 5194 InstructionMark im(this); 5195 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5196 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5197 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5198 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5199 emit_int8((unsigned char)0xD5); 5200 emit_operand(dst, src); 5201 } 5202 5203 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5204 assert(UseAVX > 0, "requires some form of AVX"); 5205 InstructionMark im(this); 5206 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5207 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5208 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5209 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5210 emit_int8(0x40); 5211 emit_operand(dst, src); 5212 } 5213 5214 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5215 assert(UseAVX > 0, "requires some form of AVX"); 5216 InstructionMark im(this); 5217 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); 5218 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5219 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5220 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5221 emit_int8(0x40); 5222 emit_operand(dst, src); 5223 } 5224 5225 // Shift packed integers left by specified number of bits. 5226 void Assembler::psllw(XMMRegister dst, int shift) { 5227 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5228 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5229 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 5230 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5231 emit_int8(0x71); 5232 emit_int8((unsigned char)(0xC0 | encode)); 5233 emit_int8(shift & 0xFF); 5234 } 5235 5236 void Assembler::pslld(XMMRegister dst, int shift) { 5237 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5238 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5239 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 5240 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5241 emit_int8(0x72); 5242 emit_int8((unsigned char)(0xC0 | encode)); 5243 emit_int8(shift & 0xFF); 5244 } 5245 5246 void Assembler::psllq(XMMRegister dst, int shift) { 5247 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5248 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5249 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 5250 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5251 emit_int8(0x73); 5252 emit_int8((unsigned char)(0xC0 | encode)); 5253 emit_int8(shift & 0xFF); 5254 } 5255 5256 void Assembler::psllw(XMMRegister dst, XMMRegister shift) { 5257 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5258 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5259 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5260 emit_int8((unsigned char)0xF1); 5261 emit_int8((unsigned char)(0xC0 | encode)); 5262 } 5263 5264 void Assembler::pslld(XMMRegister dst, XMMRegister shift) { 5265 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5266 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5267 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5268 emit_int8((unsigned char)0xF2); 5269 emit_int8((unsigned char)(0xC0 | encode)); 5270 } 5271 5272 void Assembler::psllq(XMMRegister dst, XMMRegister shift) { 5273 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5274 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5275 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5276 emit_int8((unsigned char)0xF3); 5277 emit_int8((unsigned char)(0xC0 | encode)); 5278 } 5279 5280 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5281 assert(UseAVX > 0, "requires some form of AVX"); 5282 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5283 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 5284 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5285 emit_int8(0x71); 5286 emit_int8((unsigned char)(0xC0 | encode)); 5287 emit_int8(shift & 0xFF); 5288 } 5289 5290 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5291 assert(UseAVX > 0, "requires some form of AVX"); 5292 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5293 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5294 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 5295 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5296 emit_int8(0x72); 5297 emit_int8((unsigned char)(0xC0 | encode)); 5298 emit_int8(shift & 0xFF); 5299 } 5300 5301 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5302 assert(UseAVX > 0, "requires some form of AVX"); 5303 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5304 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 5305 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5306 emit_int8(0x73); 5307 emit_int8((unsigned char)(0xC0 | encode)); 5308 emit_int8(shift & 0xFF); 5309 } 5310 5311 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5312 assert(UseAVX > 0, "requires some form of AVX"); 5313 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5314 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5315 emit_int8((unsigned char)0xF1); 5316 emit_int8((unsigned char)(0xC0 | encode)); 5317 } 5318 5319 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5320 assert(UseAVX > 0, "requires some form of AVX"); 5321 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5322 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5323 emit_int8((unsigned char)0xF2); 5324 emit_int8((unsigned char)(0xC0 | encode)); 5325 } 5326 5327 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5328 assert(UseAVX > 0, "requires some form of AVX"); 5329 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5330 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5331 emit_int8((unsigned char)0xF3); 5332 emit_int8((unsigned char)(0xC0 | encode)); 5333 } 5334 5335 // Shift packed integers logically right by specified number of bits. 5336 void Assembler::psrlw(XMMRegister dst, int shift) { 5337 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5338 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5339 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 5340 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5341 emit_int8(0x71); 5342 emit_int8((unsigned char)(0xC0 | encode)); 5343 emit_int8(shift & 0xFF); 5344 } 5345 5346 void Assembler::psrld(XMMRegister dst, int shift) { 5347 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5348 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5349 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 5350 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5351 emit_int8(0x72); 5352 emit_int8((unsigned char)(0xC0 | encode)); 5353 emit_int8(shift & 0xFF); 5354 } 5355 5356 void Assembler::psrlq(XMMRegister dst, int shift) { 5357 // Do not confuse it with psrldq SSE2 instruction which 5358 // shifts 128 bit value in xmm register by number of bytes. 5359 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5360 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5361 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 5362 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5363 emit_int8(0x73); 5364 emit_int8((unsigned char)(0xC0 | encode)); 5365 emit_int8(shift & 0xFF); 5366 } 5367 5368 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { 5369 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5370 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5371 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5372 emit_int8((unsigned char)0xD1); 5373 emit_int8((unsigned char)(0xC0 | encode)); 5374 } 5375 5376 void Assembler::psrld(XMMRegister dst, XMMRegister shift) { 5377 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5378 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5379 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5380 emit_int8((unsigned char)0xD2); 5381 emit_int8((unsigned char)(0xC0 | encode)); 5382 } 5383 5384 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { 5385 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5386 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5387 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5388 emit_int8((unsigned char)0xD3); 5389 emit_int8((unsigned char)(0xC0 | encode)); 5390 } 5391 5392 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5393 assert(UseAVX > 0, "requires some form of AVX"); 5394 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5395 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 5396 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5397 emit_int8(0x71); 5398 emit_int8((unsigned char)(0xC0 | encode)); 5399 emit_int8(shift & 0xFF); 5400 } 5401 5402 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5403 assert(UseAVX > 0, "requires some form of AVX"); 5404 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5405 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 5406 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5407 emit_int8(0x72); 5408 emit_int8((unsigned char)(0xC0 | encode)); 5409 emit_int8(shift & 0xFF); 5410 } 5411 5412 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5413 assert(UseAVX > 0, "requires some form of AVX"); 5414 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5415 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 5416 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5417 emit_int8(0x73); 5418 emit_int8((unsigned char)(0xC0 | encode)); 5419 emit_int8(shift & 0xFF); 5420 } 5421 5422 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5423 assert(UseAVX > 0, "requires some form of AVX"); 5424 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5425 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5426 emit_int8((unsigned char)0xD1); 5427 emit_int8((unsigned char)(0xC0 | encode)); 5428 } 5429 5430 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5431 assert(UseAVX > 0, "requires some form of AVX"); 5432 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5433 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5434 emit_int8((unsigned char)0xD2); 5435 emit_int8((unsigned char)(0xC0 | encode)); 5436 } 5437 5438 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5439 assert(UseAVX > 0, "requires some form of AVX"); 5440 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5441 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5442 emit_int8((unsigned char)0xD3); 5443 emit_int8((unsigned char)(0xC0 | encode)); 5444 } 5445 5446 // Shift packed integers arithmetically right by specified number of bits. 5447 void Assembler::psraw(XMMRegister dst, int shift) { 5448 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5449 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5450 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 5451 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5452 emit_int8(0x71); 5453 emit_int8((unsigned char)(0xC0 | encode)); 5454 emit_int8(shift & 0xFF); 5455 } 5456 5457 void Assembler::psrad(XMMRegister dst, int shift) { 5458 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5459 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5460 // XMM4 is for /4 encoding: 66 0F 72 /4 ib 5461 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5462 emit_int8(0x72); 5463 emit_int8((unsigned char)(0xC0 | encode)); 5464 emit_int8(shift & 0xFF); 5465 } 5466 5467 void Assembler::psraw(XMMRegister dst, XMMRegister shift) { 5468 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5469 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5470 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5471 emit_int8((unsigned char)0xE1); 5472 emit_int8((unsigned char)(0xC0 | encode)); 5473 } 5474 5475 void Assembler::psrad(XMMRegister dst, XMMRegister shift) { 5476 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5477 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5478 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5479 emit_int8((unsigned char)0xE2); 5480 emit_int8((unsigned char)(0xC0 | encode)); 5481 } 5482 5483 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5484 assert(UseAVX > 0, "requires some form of AVX"); 5485 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5486 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 5487 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5488 emit_int8(0x71); 5489 emit_int8((unsigned char)(0xC0 | encode)); 5490 emit_int8(shift & 0xFF); 5491 } 5492 5493 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5494 assert(UseAVX > 0, "requires some form of AVX"); 5495 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5496 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 5497 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5498 emit_int8(0x72); 5499 emit_int8((unsigned char)(0xC0 | encode)); 5500 emit_int8(shift & 0xFF); 5501 } 5502 5503 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5504 assert(UseAVX > 0, "requires some form of AVX"); 5505 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5506 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5507 emit_int8((unsigned char)0xE1); 5508 emit_int8((unsigned char)(0xC0 | encode)); 5509 } 5510 5511 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5512 assert(UseAVX > 0, "requires some form of AVX"); 5513 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5514 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5515 emit_int8((unsigned char)0xE2); 5516 emit_int8((unsigned char)(0xC0 | encode)); 5517 } 5518 5519 5520 // logical operations packed integers 5521 void Assembler::pand(XMMRegister dst, XMMRegister src) { 5522 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5523 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5524 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5525 emit_int8((unsigned char)0xDB); 5526 emit_int8((unsigned char)(0xC0 | encode)); 5527 } 5528 5529 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5530 assert(UseAVX > 0, "requires some form of AVX"); 5531 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5532 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5533 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5534 emit_int8((unsigned char)0xDB); 5535 emit_int8((unsigned char)(0xC0 | encode)); 5536 } 5537 5538 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5539 assert(UseAVX > 0, "requires some form of AVX"); 5540 InstructionMark im(this); 5541 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5542 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5543 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5544 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5545 emit_int8((unsigned char)0xDB); 5546 emit_operand(dst, src); 5547 } 5548 5549 void Assembler::pandn(XMMRegister dst, XMMRegister src) { 5550 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5551 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5552 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5553 emit_int8((unsigned char)0xDF); 5554 emit_int8((unsigned char)(0xC0 | encode)); 5555 } 5556 5557 void Assembler::por(XMMRegister dst, XMMRegister src) { 5558 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5559 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5560 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5561 emit_int8((unsigned char)0xEB); 5562 emit_int8((unsigned char)(0xC0 | encode)); 5563 } 5564 5565 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5566 assert(UseAVX > 0, "requires some form of AVX"); 5567 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5568 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5569 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5570 emit_int8((unsigned char)0xEB); 5571 emit_int8((unsigned char)(0xC0 | encode)); 5572 } 5573 5574 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5575 assert(UseAVX > 0, "requires some form of AVX"); 5576 InstructionMark im(this); 5577 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5578 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5579 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5580 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5581 emit_int8((unsigned char)0xEB); 5582 emit_operand(dst, src); 5583 } 5584 5585 void Assembler::pxor(XMMRegister dst, XMMRegister src) { 5586 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5587 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5588 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5589 emit_int8((unsigned char)0xEF); 5590 emit_int8((unsigned char)(0xC0 | encode)); 5591 } 5592 5593 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5594 assert(UseAVX > 0, "requires some form of AVX"); 5595 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5596 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5597 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5598 emit_int8((unsigned char)0xEF); 5599 emit_int8((unsigned char)(0xC0 | encode)); 5600 } 5601 5602 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5603 assert(UseAVX > 0, "requires some form of AVX"); 5604 InstructionMark im(this); 5605 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5606 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5607 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5608 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5609 emit_int8((unsigned char)0xEF); 5610 emit_operand(dst, src); 5611 } 5612 5613 5614 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 5615 assert(VM_Version::supports_avx(), ""); 5616 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5617 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5618 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5619 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5620 emit_int8(0x18); 5621 emit_int8((unsigned char)(0xC0 | encode)); 5622 // 0x00 - insert into lower 128 bits 5623 // 0x01 - insert into upper 128 bits 5624 emit_int8(imm8 & 0x01); 5625 } 5626 5627 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 5628 assert(VM_Version::supports_evex(), ""); 5629 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5630 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5631 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5632 emit_int8(0x1A); 5633 emit_int8((unsigned char)(0xC0 | encode)); 5634 // 0x00 - insert into lower 256 bits 5635 // 0x01 - insert into upper 256 bits 5636 emit_int8(imm8 & 0x01); 5637 } 5638 5639 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, int imm8) { 5640 assert(VM_Version::supports_evex(), ""); 5641 assert(dst != xnoreg, "sanity"); 5642 InstructionMark im(this); 5643 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5644 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5645 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 5646 // swap src<->dst for encoding 5647 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5648 emit_int8(0x1A); 5649 emit_operand(dst, src); 5650 // 0x00 - insert into lower 256 bits 5651 // 0x01 - insert into upper 128 bits 5652 emit_int8(imm8 & 0x01); 5653 } 5654 5655 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 5656 assert(VM_Version::supports_evex(), ""); 5657 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5658 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5659 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5660 emit_int8(0x18); 5661 emit_int8((unsigned char)(0xC0 | encode)); 5662 // 0x00 - insert into q0 128 bits (0..127) 5663 // 0x01 - insert into q1 128 bits (128..255) 5664 // 0x02 - insert into q2 128 bits (256..383) 5665 // 0x03 - insert into q3 128 bits (384..511) 5666 emit_int8(imm8 & 0x3); 5667 } 5668 5669 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, int imm8) { 5670 assert(VM_Version::supports_avx(), ""); 5671 assert(dst != xnoreg, "sanity"); 5672 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5673 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5674 InstructionMark im(this); 5675 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5676 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 5677 // swap src<->dst for encoding 5678 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5679 emit_int8(0x18); 5680 emit_operand(dst, src); 5681 // 0x00 - insert into q0 128 bits (0..127) 5682 // 0x01 - insert into q1 128 bits (128..255) 5683 // 0x02 - insert into q2 128 bits (256..383) 5684 // 0x03 - insert into q3 128 bits (384..511) 5685 emit_int8(imm8 & 0x3); 5686 } 5687 5688 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, int imm8) { 5689 assert(VM_Version::supports_avx(), ""); 5690 assert(dst != xnoreg, "sanity"); 5691 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5692 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5693 InstructionMark im(this); 5694 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5695 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 5696 // swap src<->dst for encoding 5697 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5698 emit_int8(0x18); 5699 emit_operand(dst, src); 5700 // 0x00 - insert into lower 128 bits 5701 // 0x01 - insert into upper 128 bits 5702 emit_int8(imm8 & 0x01); 5703 } 5704 5705 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, int imm8) { 5706 assert(VM_Version::supports_avx(), ""); 5707 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5708 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5709 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5710 emit_int8(0x19); 5711 emit_int8((unsigned char)(0xC0 | encode)); 5712 // 0x00 - extract from lower 128 bits 5713 // 0x01 - extract from upper 128 bits 5714 emit_int8(imm8 & 0x01); 5715 } 5716 5717 void Assembler::vextractf128(Address dst, XMMRegister src, int imm8) { 5718 assert(VM_Version::supports_avx(), ""); 5719 assert(src != xnoreg, "sanity"); 5720 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5721 InstructionMark im(this); 5722 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5723 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 5724 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5725 emit_int8(0x19); 5726 emit_operand(src, dst); 5727 // 0x00 - extract from lower 128 bits 5728 // 0x01 - extract from upper 128 bits 5729 emit_int8(imm8 & 0x01); 5730 } 5731 5732 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 5733 assert(VM_Version::supports_avx2(), ""); 5734 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5735 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5736 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5737 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5738 emit_int8(0x38); 5739 emit_int8((unsigned char)(0xC0 | encode)); 5740 // 0x00 - insert into lower 128 bits 5741 // 0x01 - insert into upper 128 bits 5742 emit_int8(imm8 & 0x01); 5743 } 5744 5745 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 5746 assert(VM_Version::supports_evex(), ""); 5747 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5748 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5749 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5750 emit_int8(0x38); 5751 emit_int8((unsigned char)(0xC0 | encode)); 5752 // 0x00 - insert into lower 256 bits 5753 // 0x01 - insert into upper 256 bits 5754 emit_int8(imm8 & 0x01); 5755 } 5756 5757 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, int imm8) { 5758 assert(VM_Version::supports_avx2(), ""); 5759 assert(dst != xnoreg, "sanity"); 5760 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5761 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5762 InstructionMark im(this); 5763 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5764 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 5765 // swap src<->dst for encoding 5766 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5767 emit_int8(0x38); 5768 emit_operand(dst, src); 5769 // 0x00 - insert into lower 128 bits 5770 // 0x01 - insert into upper 128 bits 5771 emit_int8(imm8 & 0x01); 5772 } 5773 5774 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, int imm8) { 5775 assert(VM_Version::supports_avx(), ""); 5776 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5777 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5778 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5779 emit_int8(0x39); 5780 emit_int8((unsigned char)(0xC0 | encode)); 5781 // 0x00 - extract from lower 128 bits 5782 // 0x01 - extract from upper 128 bits 5783 emit_int8(imm8 & 0x01); 5784 } 5785 5786 void Assembler::vextracti128(Address dst, XMMRegister src, int imm8) { 5787 assert(VM_Version::supports_avx2(), ""); 5788 assert(src != xnoreg, "sanity"); 5789 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5790 InstructionMark im(this); 5791 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5792 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 5793 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5794 emit_int8(0x39); 5795 emit_operand(src, dst); 5796 // 0x00 - extract from lower 128 bits 5797 // 0x01 - extract from upper 128 bits 5798 emit_int8(imm8 & 0x01); 5799 } 5800 5801 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, int imm8) { 5802 assert(VM_Version::supports_evex(), ""); 5803 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5804 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5805 emit_int8(0x3B); 5806 emit_int8((unsigned char)(0xC0 | encode)); 5807 // 0x00 - extract from lower 256 bits 5808 // 0x01 - extract from upper 256 bits 5809 emit_int8(imm8 & 0x01); 5810 } 5811 5812 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, int imm8) { 5813 assert(VM_Version::supports_evex(), ""); 5814 InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5815 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5816 emit_int8(0x39); 5817 emit_int8((unsigned char)(0xC0 | encode)); 5818 // 0x00 - extract from bits 127:0 5819 // 0x01 - extract from bits 255:128 5820 // 0x02 - extract from bits 383:256 5821 // 0x03 - extract from bits 511:384 5822 emit_int8(imm8 & 0x3); 5823 } 5824 5825 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, int imm8) { 5826 assert(VM_Version::supports_evex(), ""); 5827 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5828 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5829 emit_int8(0x1B); 5830 emit_int8((unsigned char)(0xC0 | encode)); 5831 // 0x00 - extract from lower 256 bits 5832 // 0x01 - extract from upper 256 bits 5833 emit_int8(imm8 & 0x1); 5834 } 5835 5836 void Assembler::vextractf64x4(Address dst, XMMRegister src, int imm8) { 5837 assert(VM_Version::supports_evex(), ""); 5838 assert(src != xnoreg, "sanity"); 5839 InstructionMark im(this); 5840 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5841 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit); 5842 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5843 emit_int8(0x1B); 5844 emit_operand(src, dst); 5845 // 0x00 - extract from lower 256 bits 5846 // 0x01 - extract from upper 256 bits 5847 emit_int8(imm8 & 0x01); 5848 } 5849 5850 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, int imm8) { 5851 assert(VM_Version::supports_avx(), ""); 5852 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; 5853 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5854 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5855 emit_int8(0x19); 5856 emit_int8((unsigned char)(0xC0 | encode)); 5857 // 0x00 - extract from bits 127:0 5858 // 0x01 - extract from bits 255:128 5859 // 0x02 - extract from bits 383:256 5860 // 0x03 - extract from bits 511:384 5861 emit_int8(imm8 & 0x3); 5862 } 5863 5864 void Assembler::vextractf32x4(Address dst, XMMRegister src, int imm8) { 5865 assert(VM_Version::supports_evex(), ""); 5866 assert(src != xnoreg, "sanity"); 5867 InstructionMark im(this); 5868 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5869 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 5870 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5871 emit_int8(0x19); 5872 emit_operand(src, dst); 5873 // 0x00 - extract from bits 127:0 5874 // 0x01 - extract from bits 255:128 5875 // 0x02 - extract from bits 383:256 5876 // 0x03 - extract from bits 511:384 5877 emit_int8(imm8 & 0x3); 5878 } 5879 5880 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, int imm8) { 5881 assert(VM_Version::supports_evex(), ""); 5882 InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); 5883 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5884 emit_int8(0x19); 5885 emit_int8((unsigned char)(0xC0 | encode)); 5886 // 0x00 - extract from bits 127:0 5887 // 0x01 - extract from bits 255:128 5888 // 0x02 - extract from bits 383:256 5889 // 0x03 - extract from bits 511:384 5890 emit_int8(imm8 & 0x3); 5891 } 5892 5893 // duplicate 4-bytes integer data from src into 8 locations in dest 5894 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) { 5895 assert(VM_Version::supports_avx2(), ""); 5896 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5897 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5898 emit_int8(0x58); 5899 emit_int8((unsigned char)(0xC0 | encode)); 5900 } 5901 5902 // duplicate 2-bytes integer data from src into 16 locations in dest 5903 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) { 5904 assert(VM_Version::supports_avx2(), ""); 5905 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5906 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5907 emit_int8(0x79); 5908 emit_int8((unsigned char)(0xC0 | encode)); 5909 } 5910 5911 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL 5912 void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { 5913 assert(VM_Version::supports_evex(), ""); 5914 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5915 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5916 emit_int8(0x78); 5917 emit_int8((unsigned char)(0xC0 | encode)); 5918 } 5919 5920 void Assembler::evpbroadcastb(XMMRegister dst, Address src, int vector_len) { 5921 assert(VM_Version::supports_evex(), ""); 5922 assert(dst != xnoreg, "sanity"); 5923 InstructionMark im(this); 5924 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5925 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 5926 // swap src<->dst for encoding 5927 vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5928 emit_int8(0x78); 5929 emit_operand(dst, src); 5930 } 5931 5932 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL 5933 void Assembler::evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 5934 assert(VM_Version::supports_evex(), ""); 5935 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5936 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5937 emit_int8(0x79); 5938 emit_int8((unsigned char)(0xC0 | encode)); 5939 } 5940 5941 void Assembler::evpbroadcastw(XMMRegister dst, Address src, int vector_len) { 5942 assert(VM_Version::supports_evex(), ""); 5943 assert(dst != xnoreg, "sanity"); 5944 InstructionMark im(this); 5945 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5946 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 5947 // swap src<->dst for encoding 5948 vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5949 emit_int8(0x79); 5950 emit_operand(dst, src); 5951 } 5952 5953 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5954 void Assembler::evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { 5955 assert(VM_Version::supports_evex(), ""); 5956 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5957 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5958 emit_int8(0x58); 5959 emit_int8((unsigned char)(0xC0 | encode)); 5960 } 5961 5962 void Assembler::evpbroadcastd(XMMRegister dst, Address src, int vector_len) { 5963 assert(VM_Version::supports_evex(), ""); 5964 assert(dst != xnoreg, "sanity"); 5965 InstructionMark im(this); 5966 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5967 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 5968 // swap src<->dst for encoding 5969 vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5970 emit_int8(0x58); 5971 emit_operand(dst, src); 5972 } 5973 5974 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5975 void Assembler::evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { 5976 assert(VM_Version::supports_evex(), ""); 5977 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5978 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5979 emit_int8(0x59); 5980 emit_int8((unsigned char)(0xC0 | encode)); 5981 } 5982 5983 void Assembler::evpbroadcastq(XMMRegister dst, Address src, int vector_len) { 5984 assert(VM_Version::supports_evex(), ""); 5985 assert(dst != xnoreg, "sanity"); 5986 InstructionMark im(this); 5987 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5988 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 5989 // swap src<->dst for encoding 5990 vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5991 emit_int8(0x59); 5992 emit_operand(dst, src); 5993 } 5994 5995 // duplicate single precision fp from src into 4|8|16 locations in dest : requires AVX512VL 5996 void Assembler::evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { 5997 assert(VM_Version::supports_evex(), ""); 5998 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 5999 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6000 emit_int8(0x18); 6001 emit_int8((unsigned char)(0xC0 | encode)); 6002 } 6003 6004 void Assembler::evpbroadcastss(XMMRegister dst, Address src, int vector_len) { 6005 assert(VM_Version::supports_evex(), ""); 6006 assert(dst != xnoreg, "sanity"); 6007 InstructionMark im(this); 6008 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6009 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6010 // swap src<->dst for encoding 6011 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6012 emit_int8(0x18); 6013 emit_operand(dst, src); 6014 } 6015 6016 // duplicate double precision fp from src into 2|4|8 locations in dest : requires AVX512VL 6017 void Assembler::evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { 6018 assert(VM_Version::supports_evex(), ""); 6019 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6020 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6021 emit_int8(0x19); 6022 emit_int8((unsigned char)(0xC0 | encode)); 6023 } 6024 6025 void Assembler::evpbroadcastsd(XMMRegister dst, Address src, int vector_len) { 6026 assert(VM_Version::supports_evex(), ""); 6027 assert(dst != xnoreg, "sanity"); 6028 InstructionMark im(this); 6029 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6030 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6031 // swap src<->dst for encoding 6032 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6033 emit_int8(0x19); 6034 emit_operand(dst, src); 6035 } 6036 6037 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL 6038 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { 6039 assert(VM_Version::supports_evex(), ""); 6040 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6041 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6042 emit_int8(0x7A); 6043 emit_int8((unsigned char)(0xC0 | encode)); 6044 } 6045 6046 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL 6047 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { 6048 assert(VM_Version::supports_evex(), ""); 6049 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6050 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6051 emit_int8(0x7B); 6052 emit_int8((unsigned char)(0xC0 | encode)); 6053 } 6054 6055 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 6056 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { 6057 assert(VM_Version::supports_evex(), ""); 6058 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6059 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6060 emit_int8(0x7C); 6061 emit_int8((unsigned char)(0xC0 | encode)); 6062 } 6063 6064 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 6065 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { 6066 assert(VM_Version::supports_evex(), ""); 6067 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6068 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6069 emit_int8(0x7C); 6070 emit_int8((unsigned char)(0xC0 | encode)); 6071 } 6072 6073 // Carry-Less Multiplication Quadword 6074 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { 6075 assert(VM_Version::supports_clmul(), ""); 6076 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 6077 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6078 emit_int8(0x44); 6079 emit_int8((unsigned char)(0xC0 | encode)); 6080 emit_int8((unsigned char)mask); 6081 } 6082 6083 // Carry-Less Multiplication Quadword 6084 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 6085 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 6086 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 6087 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 6088 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6089 emit_int8(0x44); 6090 emit_int8((unsigned char)(0xC0 | encode)); 6091 emit_int8((unsigned char)mask); 6092 } 6093 6094 void Assembler::vzeroupper() { 6095 assert(VM_Version::supports_avx(), ""); 6096 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 6097 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6098 emit_int8(0x77); 6099 } 6100 6101 6102 #ifndef _LP64 6103 // 32bit only pieces of the assembler 6104 6105 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 6106 // NO PREFIX AS NEVER 64BIT 6107 InstructionMark im(this); 6108 emit_int8((unsigned char)0x81); 6109 emit_int8((unsigned char)(0xF8 | src1->encoding())); 6110 emit_data(imm32, rspec, 0); 6111 } 6112 6113 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { 6114 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs 6115 InstructionMark im(this); 6116 emit_int8((unsigned char)0x81); 6117 emit_operand(rdi, src1); 6118 emit_data(imm32, rspec, 0); 6119 } 6120 6121 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, 6122 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded 6123 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. 6124 void Assembler::cmpxchg8(Address adr) { 6125 InstructionMark im(this); 6126 emit_int8(0x0F); 6127 emit_int8((unsigned char)0xC7); 6128 emit_operand(rcx, adr); 6129 } 6130 6131 void Assembler::decl(Register dst) { 6132 // Don't use it directly. Use MacroAssembler::decrementl() instead. 6133 emit_int8(0x48 | dst->encoding()); 6134 } 6135 6136 #endif // _LP64 6137 6138 // 64bit typically doesn't use the x87 but needs to for the trig funcs 6139 6140 void Assembler::fabs() { 6141 emit_int8((unsigned char)0xD9); 6142 emit_int8((unsigned char)0xE1); 6143 } 6144 6145 void Assembler::fadd(int i) { 6146 emit_farith(0xD8, 0xC0, i); 6147 } 6148 6149 void Assembler::fadd_d(Address src) { 6150 InstructionMark im(this); 6151 emit_int8((unsigned char)0xDC); 6152 emit_operand32(rax, src); 6153 } 6154 6155 void Assembler::fadd_s(Address src) { 6156 InstructionMark im(this); 6157 emit_int8((unsigned char)0xD8); 6158 emit_operand32(rax, src); 6159 } 6160 6161 void Assembler::fadda(int i) { 6162 emit_farith(0xDC, 0xC0, i); 6163 } 6164 6165 void Assembler::faddp(int i) { 6166 emit_farith(0xDE, 0xC0, i); 6167 } 6168 6169 void Assembler::fchs() { 6170 emit_int8((unsigned char)0xD9); 6171 emit_int8((unsigned char)0xE0); 6172 } 6173 6174 void Assembler::fcom(int i) { 6175 emit_farith(0xD8, 0xD0, i); 6176 } 6177 6178 void Assembler::fcomp(int i) { 6179 emit_farith(0xD8, 0xD8, i); 6180 } 6181 6182 void Assembler::fcomp_d(Address src) { 6183 InstructionMark im(this); 6184 emit_int8((unsigned char)0xDC); 6185 emit_operand32(rbx, src); 6186 } 6187 6188 void Assembler::fcomp_s(Address src) { 6189 InstructionMark im(this); 6190 emit_int8((unsigned char)0xD8); 6191 emit_operand32(rbx, src); 6192 } 6193 6194 void Assembler::fcompp() { 6195 emit_int8((unsigned char)0xDE); 6196 emit_int8((unsigned char)0xD9); 6197 } 6198 6199 void Assembler::fcos() { 6200 emit_int8((unsigned char)0xD9); 6201 emit_int8((unsigned char)0xFF); 6202 } 6203 6204 void Assembler::fdecstp() { 6205 emit_int8((unsigned char)0xD9); 6206 emit_int8((unsigned char)0xF6); 6207 } 6208 6209 void Assembler::fdiv(int i) { 6210 emit_farith(0xD8, 0xF0, i); 6211 } 6212 6213 void Assembler::fdiv_d(Address src) { 6214 InstructionMark im(this); 6215 emit_int8((unsigned char)0xDC); 6216 emit_operand32(rsi, src); 6217 } 6218 6219 void Assembler::fdiv_s(Address src) { 6220 InstructionMark im(this); 6221 emit_int8((unsigned char)0xD8); 6222 emit_operand32(rsi, src); 6223 } 6224 6225 void Assembler::fdiva(int i) { 6226 emit_farith(0xDC, 0xF8, i); 6227 } 6228 6229 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) 6230 // is erroneous for some of the floating-point instructions below. 6231 6232 void Assembler::fdivp(int i) { 6233 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) 6234 } 6235 6236 void Assembler::fdivr(int i) { 6237 emit_farith(0xD8, 0xF8, i); 6238 } 6239 6240 void Assembler::fdivr_d(Address src) { 6241 InstructionMark im(this); 6242 emit_int8((unsigned char)0xDC); 6243 emit_operand32(rdi, src); 6244 } 6245 6246 void Assembler::fdivr_s(Address src) { 6247 InstructionMark im(this); 6248 emit_int8((unsigned char)0xD8); 6249 emit_operand32(rdi, src); 6250 } 6251 6252 void Assembler::fdivra(int i) { 6253 emit_farith(0xDC, 0xF0, i); 6254 } 6255 6256 void Assembler::fdivrp(int i) { 6257 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) 6258 } 6259 6260 void Assembler::ffree(int i) { 6261 emit_farith(0xDD, 0xC0, i); 6262 } 6263 6264 void Assembler::fild_d(Address adr) { 6265 InstructionMark im(this); 6266 emit_int8((unsigned char)0xDF); 6267 emit_operand32(rbp, adr); 6268 } 6269 6270 void Assembler::fild_s(Address adr) { 6271 InstructionMark im(this); 6272 emit_int8((unsigned char)0xDB); 6273 emit_operand32(rax, adr); 6274 } 6275 6276 void Assembler::fincstp() { 6277 emit_int8((unsigned char)0xD9); 6278 emit_int8((unsigned char)0xF7); 6279 } 6280 6281 void Assembler::finit() { 6282 emit_int8((unsigned char)0x9B); 6283 emit_int8((unsigned char)0xDB); 6284 emit_int8((unsigned char)0xE3); 6285 } 6286 6287 void Assembler::fist_s(Address adr) { 6288 InstructionMark im(this); 6289 emit_int8((unsigned char)0xDB); 6290 emit_operand32(rdx, adr); 6291 } 6292 6293 void Assembler::fistp_d(Address adr) { 6294 InstructionMark im(this); 6295 emit_int8((unsigned char)0xDF); 6296 emit_operand32(rdi, adr); 6297 } 6298 6299 void Assembler::fistp_s(Address adr) { 6300 InstructionMark im(this); 6301 emit_int8((unsigned char)0xDB); 6302 emit_operand32(rbx, adr); 6303 } 6304 6305 void Assembler::fld1() { 6306 emit_int8((unsigned char)0xD9); 6307 emit_int8((unsigned char)0xE8); 6308 } 6309 6310 void Assembler::fld_d(Address adr) { 6311 InstructionMark im(this); 6312 emit_int8((unsigned char)0xDD); 6313 emit_operand32(rax, adr); 6314 } 6315 6316 void Assembler::fld_s(Address adr) { 6317 InstructionMark im(this); 6318 emit_int8((unsigned char)0xD9); 6319 emit_operand32(rax, adr); 6320 } 6321 6322 6323 void Assembler::fld_s(int index) { 6324 emit_farith(0xD9, 0xC0, index); 6325 } 6326 6327 void Assembler::fld_x(Address adr) { 6328 InstructionMark im(this); 6329 emit_int8((unsigned char)0xDB); 6330 emit_operand32(rbp, adr); 6331 } 6332 6333 void Assembler::fldcw(Address src) { 6334 InstructionMark im(this); 6335 emit_int8((unsigned char)0xD9); 6336 emit_operand32(rbp, src); 6337 } 6338 6339 void Assembler::fldenv(Address src) { 6340 InstructionMark im(this); 6341 emit_int8((unsigned char)0xD9); 6342 emit_operand32(rsp, src); 6343 } 6344 6345 void Assembler::fldlg2() { 6346 emit_int8((unsigned char)0xD9); 6347 emit_int8((unsigned char)0xEC); 6348 } 6349 6350 void Assembler::fldln2() { 6351 emit_int8((unsigned char)0xD9); 6352 emit_int8((unsigned char)0xED); 6353 } 6354 6355 void Assembler::fldz() { 6356 emit_int8((unsigned char)0xD9); 6357 emit_int8((unsigned char)0xEE); 6358 } 6359 6360 void Assembler::flog() { 6361 fldln2(); 6362 fxch(); 6363 fyl2x(); 6364 } 6365 6366 void Assembler::flog10() { 6367 fldlg2(); 6368 fxch(); 6369 fyl2x(); 6370 } 6371 6372 void Assembler::fmul(int i) { 6373 emit_farith(0xD8, 0xC8, i); 6374 } 6375 6376 void Assembler::fmul_d(Address src) { 6377 InstructionMark im(this); 6378 emit_int8((unsigned char)0xDC); 6379 emit_operand32(rcx, src); 6380 } 6381 6382 void Assembler::fmul_s(Address src) { 6383 InstructionMark im(this); 6384 emit_int8((unsigned char)0xD8); 6385 emit_operand32(rcx, src); 6386 } 6387 6388 void Assembler::fmula(int i) { 6389 emit_farith(0xDC, 0xC8, i); 6390 } 6391 6392 void Assembler::fmulp(int i) { 6393 emit_farith(0xDE, 0xC8, i); 6394 } 6395 6396 void Assembler::fnsave(Address dst) { 6397 InstructionMark im(this); 6398 emit_int8((unsigned char)0xDD); 6399 emit_operand32(rsi, dst); 6400 } 6401 6402 void Assembler::fnstcw(Address src) { 6403 InstructionMark im(this); 6404 emit_int8((unsigned char)0x9B); 6405 emit_int8((unsigned char)0xD9); 6406 emit_operand32(rdi, src); 6407 } 6408 6409 void Assembler::fnstsw_ax() { 6410 emit_int8((unsigned char)0xDF); 6411 emit_int8((unsigned char)0xE0); 6412 } 6413 6414 void Assembler::fprem() { 6415 emit_int8((unsigned char)0xD9); 6416 emit_int8((unsigned char)0xF8); 6417 } 6418 6419 void Assembler::fprem1() { 6420 emit_int8((unsigned char)0xD9); 6421 emit_int8((unsigned char)0xF5); 6422 } 6423 6424 void Assembler::frstor(Address src) { 6425 InstructionMark im(this); 6426 emit_int8((unsigned char)0xDD); 6427 emit_operand32(rsp, src); 6428 } 6429 6430 void Assembler::fsin() { 6431 emit_int8((unsigned char)0xD9); 6432 emit_int8((unsigned char)0xFE); 6433 } 6434 6435 void Assembler::fsqrt() { 6436 emit_int8((unsigned char)0xD9); 6437 emit_int8((unsigned char)0xFA); 6438 } 6439 6440 void Assembler::fst_d(Address adr) { 6441 InstructionMark im(this); 6442 emit_int8((unsigned char)0xDD); 6443 emit_operand32(rdx, adr); 6444 } 6445 6446 void Assembler::fst_s(Address adr) { 6447 InstructionMark im(this); 6448 emit_int8((unsigned char)0xD9); 6449 emit_operand32(rdx, adr); 6450 } 6451 6452 void Assembler::fstp_d(Address adr) { 6453 InstructionMark im(this); 6454 emit_int8((unsigned char)0xDD); 6455 emit_operand32(rbx, adr); 6456 } 6457 6458 void Assembler::fstp_d(int index) { 6459 emit_farith(0xDD, 0xD8, index); 6460 } 6461 6462 void Assembler::fstp_s(Address adr) { 6463 InstructionMark im(this); 6464 emit_int8((unsigned char)0xD9); 6465 emit_operand32(rbx, adr); 6466 } 6467 6468 void Assembler::fstp_x(Address adr) { 6469 InstructionMark im(this); 6470 emit_int8((unsigned char)0xDB); 6471 emit_operand32(rdi, adr); 6472 } 6473 6474 void Assembler::fsub(int i) { 6475 emit_farith(0xD8, 0xE0, i); 6476 } 6477 6478 void Assembler::fsub_d(Address src) { 6479 InstructionMark im(this); 6480 emit_int8((unsigned char)0xDC); 6481 emit_operand32(rsp, src); 6482 } 6483 6484 void Assembler::fsub_s(Address src) { 6485 InstructionMark im(this); 6486 emit_int8((unsigned char)0xD8); 6487 emit_operand32(rsp, src); 6488 } 6489 6490 void Assembler::fsuba(int i) { 6491 emit_farith(0xDC, 0xE8, i); 6492 } 6493 6494 void Assembler::fsubp(int i) { 6495 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) 6496 } 6497 6498 void Assembler::fsubr(int i) { 6499 emit_farith(0xD8, 0xE8, i); 6500 } 6501 6502 void Assembler::fsubr_d(Address src) { 6503 InstructionMark im(this); 6504 emit_int8((unsigned char)0xDC); 6505 emit_operand32(rbp, src); 6506 } 6507 6508 void Assembler::fsubr_s(Address src) { 6509 InstructionMark im(this); 6510 emit_int8((unsigned char)0xD8); 6511 emit_operand32(rbp, src); 6512 } 6513 6514 void Assembler::fsubra(int i) { 6515 emit_farith(0xDC, 0xE0, i); 6516 } 6517 6518 void Assembler::fsubrp(int i) { 6519 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) 6520 } 6521 6522 void Assembler::ftan() { 6523 emit_int8((unsigned char)0xD9); 6524 emit_int8((unsigned char)0xF2); 6525 emit_int8((unsigned char)0xDD); 6526 emit_int8((unsigned char)0xD8); 6527 } 6528 6529 void Assembler::ftst() { 6530 emit_int8((unsigned char)0xD9); 6531 emit_int8((unsigned char)0xE4); 6532 } 6533 6534 void Assembler::fucomi(int i) { 6535 // make sure the instruction is supported (introduced for P6, together with cmov) 6536 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 6537 emit_farith(0xDB, 0xE8, i); 6538 } 6539 6540 void Assembler::fucomip(int i) { 6541 // make sure the instruction is supported (introduced for P6, together with cmov) 6542 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 6543 emit_farith(0xDF, 0xE8, i); 6544 } 6545 6546 void Assembler::fwait() { 6547 emit_int8((unsigned char)0x9B); 6548 } 6549 6550 void Assembler::fxch(int i) { 6551 emit_farith(0xD9, 0xC8, i); 6552 } 6553 6554 void Assembler::fyl2x() { 6555 emit_int8((unsigned char)0xD9); 6556 emit_int8((unsigned char)0xF1); 6557 } 6558 6559 void Assembler::frndint() { 6560 emit_int8((unsigned char)0xD9); 6561 emit_int8((unsigned char)0xFC); 6562 } 6563 6564 void Assembler::f2xm1() { 6565 emit_int8((unsigned char)0xD9); 6566 emit_int8((unsigned char)0xF0); 6567 } 6568 6569 void Assembler::fldl2e() { 6570 emit_int8((unsigned char)0xD9); 6571 emit_int8((unsigned char)0xEA); 6572 } 6573 6574 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. 6575 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; 6576 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding. 6577 static int simd_opc[4] = { 0, 0, 0x38, 0x3A }; 6578 6579 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding. 6580 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 6581 if (pre > 0) { 6582 emit_int8(simd_pre[pre]); 6583 } 6584 if (rex_w) { 6585 prefixq(adr, xreg); 6586 } else { 6587 prefix(adr, xreg); 6588 } 6589 if (opc > 0) { 6590 emit_int8(0x0F); 6591 int opc2 = simd_opc[opc]; 6592 if (opc2 > 0) { 6593 emit_int8(opc2); 6594 } 6595 } 6596 } 6597 6598 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 6599 if (pre > 0) { 6600 emit_int8(simd_pre[pre]); 6601 } 6602 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc); 6603 if (opc > 0) { 6604 emit_int8(0x0F); 6605 int opc2 = simd_opc[opc]; 6606 if (opc2 > 0) { 6607 emit_int8(opc2); 6608 } 6609 } 6610 return encode; 6611 } 6612 6613 6614 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) { 6615 int vector_len = _attributes->get_vector_len(); 6616 bool vex_w = _attributes->is_rex_vex_w(); 6617 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { 6618 prefix(VEX_3bytes); 6619 6620 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); 6621 byte1 = (~byte1) & 0xE0; 6622 byte1 |= opc; 6623 emit_int8(byte1); 6624 6625 int byte2 = ((~nds_enc) & 0xf) << 3; 6626 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; 6627 emit_int8(byte2); 6628 } else { 6629 prefix(VEX_2bytes); 6630 6631 int byte1 = vex_r ? VEX_R : 0; 6632 byte1 = (~byte1) & 0x80; 6633 byte1 |= ((~nds_enc) & 0xf) << 3; 6634 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; 6635 emit_int8(byte1); 6636 } 6637 } 6638 6639 // This is a 4 byte encoding 6640 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, int nds_enc, VexSimdPrefix pre, VexOpcode opc){ 6641 // EVEX 0x62 prefix 6642 prefix(EVEX_4bytes); 6643 bool vex_w = _attributes->is_rex_vex_w(); 6644 int evex_encoding = (vex_w ? VEX_W : 0); 6645 // EVEX.b is not currently used for broadcast of single element or data rounding modes 6646 _attributes->set_evex_encoding(evex_encoding); 6647 6648 // P0: byte 2, initialized to RXBR`00mm 6649 // instead of not'd 6650 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); 6651 byte2 = (~byte2) & 0xF0; 6652 // confine opc opcode extensions in mm bits to lower two bits 6653 // of form {0F, 0F_38, 0F_3A} 6654 byte2 |= opc; 6655 emit_int8(byte2); 6656 6657 // P1: byte 3 as Wvvvv1pp 6658 int byte3 = ((~nds_enc) & 0xf) << 3; 6659 // p[10] is always 1 6660 byte3 |= EVEX_F; 6661 byte3 |= (vex_w & 1) << 7; 6662 // confine pre opcode extensions in pp bits to lower two bits 6663 // of form {66, F3, F2} 6664 byte3 |= pre; 6665 emit_int8(byte3); 6666 6667 // P2: byte 4 as zL'Lbv'aaa 6668 int byte4 = (_attributes->is_no_reg_mask()) ? 0 : 1; // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now) 6669 // EVEX.v` for extending EVEX.vvvv or VIDX 6670 byte4 |= (evex_v ? 0: EVEX_V); 6671 // third EXEC.b for broadcast actions 6672 byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0); 6673 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024 6674 byte4 |= ((_attributes->get_vector_len())& 0x3) << 5; 6675 // last is EVEX.z for zero/merge actions 6676 byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0); 6677 emit_int8(byte4); 6678 } 6679 6680 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 6681 bool vex_r = ((xreg_enc & 8) == 8) ? 1 : 0; 6682 bool vex_b = adr.base_needs_rex(); 6683 bool vex_x = adr.index_needs_rex(); 6684 set_attributes(attributes); 6685 attributes->set_current_assembler(this); 6686 6687 // if vector length is turned off, revert to AVX for vectors smaller than 512-bit 6688 if ((UseAVX > 2) && _legacy_mode_vl && attributes->uses_vl()) { 6689 switch (attributes->get_vector_len()) { 6690 case AVX_128bit: 6691 case AVX_256bit: 6692 attributes->set_is_legacy_mode(); 6693 break; 6694 } 6695 } 6696 6697 if ((UseAVX > 2) && !attributes->is_legacy_mode()) 6698 { 6699 bool evex_r = (xreg_enc >= 16); 6700 bool evex_v = (nds_enc >= 16); 6701 attributes->set_is_evex_instruction(); 6702 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 6703 } else { 6704 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 6705 } 6706 } 6707 6708 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 6709 bool vex_r = ((dst_enc & 8) == 8) ? 1 : 0; 6710 bool vex_b = ((src_enc & 8) == 8) ? 1 : 0; 6711 bool vex_x = false; 6712 set_attributes(attributes); 6713 attributes->set_current_assembler(this); 6714 6715 // if vector length is turned off, revert to AVX for vectors smaller than 512-bit 6716 if ((UseAVX > 2) && _legacy_mode_vl && attributes->uses_vl()) { 6717 switch (attributes->get_vector_len()) { 6718 case AVX_128bit: 6719 case AVX_256bit: 6720 if ((dst_enc >= 16) | (nds_enc >= 16) | (src_enc >= 16)) { 6721 // up propagate arithmetic instructions to meet RA requirements 6722 attributes->set_vector_len(AVX_512bit); 6723 } else { 6724 attributes->set_is_legacy_mode(); 6725 } 6726 break; 6727 } 6728 } 6729 6730 if ((UseAVX > 2) && !attributes->is_legacy_mode()) 6731 { 6732 bool evex_r = (dst_enc >= 16); 6733 bool evex_v = (nds_enc >= 16); 6734 // can use vex_x as bank extender on rm encoding 6735 vex_x = (src_enc >= 16); 6736 attributes->set_is_evex_instruction(); 6737 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 6738 } else { 6739 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 6740 } 6741 6742 // return modrm byte components for operands 6743 return (((dst_enc & 7) << 3) | (src_enc & 7)); 6744 } 6745 6746 6747 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 6748 VexOpcode opc, InstructionAttr *attributes) { 6749 if (UseAVX > 0) { 6750 int xreg_enc = xreg->encoding(); 6751 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 6752 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, attributes); 6753 } else { 6754 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); 6755 rex_prefix(adr, xreg, pre, opc, attributes->is_rex_vex_w()); 6756 } 6757 } 6758 6759 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 6760 VexOpcode opc, InstructionAttr *attributes) { 6761 int dst_enc = dst->encoding(); 6762 int src_enc = src->encoding(); 6763 if (UseAVX > 0) { 6764 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 6765 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes); 6766 } else { 6767 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); 6768 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, attributes->is_rex_vex_w()); 6769 } 6770 } 6771 6772 void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { 6773 assert(VM_Version::supports_avx(), ""); 6774 assert(!VM_Version::supports_evex(), ""); 6775 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 6776 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6777 emit_int8((unsigned char)0xC2); 6778 emit_int8((unsigned char)(0xC0 | encode)); 6779 emit_int8((unsigned char)(0xF & cop)); 6780 } 6781 6782 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 6783 assert(VM_Version::supports_avx(), ""); 6784 assert(!VM_Version::supports_evex(), ""); 6785 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 6786 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 6787 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6788 emit_int8((unsigned char)0x4B); 6789 emit_int8((unsigned char)(0xC0 | encode)); 6790 int src2_enc = src2->encoding(); 6791 emit_int8((unsigned char)(0xF0 & src2_enc<<4)); 6792 } 6793 6794 6795 #ifndef _LP64 6796 6797 void Assembler::incl(Register dst) { 6798 // Don't use it directly. Use MacroAssembler::incrementl() instead. 6799 emit_int8(0x40 | dst->encoding()); 6800 } 6801 6802 void Assembler::lea(Register dst, Address src) { 6803 leal(dst, src); 6804 } 6805 6806 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { 6807 InstructionMark im(this); 6808 emit_int8((unsigned char)0xC7); 6809 emit_operand(rax, dst); 6810 emit_data((int)imm32, rspec, 0); 6811 } 6812 6813 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { 6814 InstructionMark im(this); 6815 int encode = prefix_and_encode(dst->encoding()); 6816 emit_int8((unsigned char)(0xB8 | encode)); 6817 emit_data((int)imm32, rspec, 0); 6818 } 6819 6820 void Assembler::popa() { // 32bit 6821 emit_int8(0x61); 6822 } 6823 6824 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { 6825 InstructionMark im(this); 6826 emit_int8(0x68); 6827 emit_data(imm32, rspec, 0); 6828 } 6829 6830 void Assembler::pusha() { // 32bit 6831 emit_int8(0x60); 6832 } 6833 6834 void Assembler::set_byte_if_not_zero(Register dst) { 6835 emit_int8(0x0F); 6836 emit_int8((unsigned char)0x95); 6837 emit_int8((unsigned char)(0xE0 | dst->encoding())); 6838 } 6839 6840 void Assembler::shldl(Register dst, Register src) { 6841 emit_int8(0x0F); 6842 emit_int8((unsigned char)0xA5); 6843 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 6844 } 6845 6846 // 0F A4 / r ib 6847 void Assembler::shldl(Register dst, Register src, int8_t imm8) { 6848 emit_int8(0x0F); 6849 emit_int8((unsigned char)0xA4); 6850 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 6851 emit_int8(imm8); 6852 } 6853 6854 void Assembler::shrdl(Register dst, Register src) { 6855 emit_int8(0x0F); 6856 emit_int8((unsigned char)0xAD); 6857 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 6858 } 6859 6860 #else // LP64 6861 6862 void Assembler::set_byte_if_not_zero(Register dst) { 6863 int enc = prefix_and_encode(dst->encoding(), true); 6864 emit_int8(0x0F); 6865 emit_int8((unsigned char)0x95); 6866 emit_int8((unsigned char)(0xE0 | enc)); 6867 } 6868 6869 // 64bit only pieces of the assembler 6870 // This should only be used by 64bit instructions that can use rip-relative 6871 // it cannot be used by instructions that want an immediate value. 6872 6873 bool Assembler::reachable(AddressLiteral adr) { 6874 int64_t disp; 6875 // None will force a 64bit literal to the code stream. Likely a placeholder 6876 // for something that will be patched later and we need to certain it will 6877 // always be reachable. 6878 if (adr.reloc() == relocInfo::none) { 6879 return false; 6880 } 6881 if (adr.reloc() == relocInfo::internal_word_type) { 6882 // This should be rip relative and easily reachable. 6883 return true; 6884 } 6885 if (adr.reloc() == relocInfo::virtual_call_type || 6886 adr.reloc() == relocInfo::opt_virtual_call_type || 6887 adr.reloc() == relocInfo::static_call_type || 6888 adr.reloc() == relocInfo::static_stub_type ) { 6889 // This should be rip relative within the code cache and easily 6890 // reachable until we get huge code caches. (At which point 6891 // ic code is going to have issues). 6892 return true; 6893 } 6894 if (adr.reloc() != relocInfo::external_word_type && 6895 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special 6896 adr.reloc() != relocInfo::poll_type && // relocs to identify them 6897 adr.reloc() != relocInfo::runtime_call_type ) { 6898 return false; 6899 } 6900 6901 // Stress the correction code 6902 if (ForceUnreachable) { 6903 // Must be runtimecall reloc, see if it is in the codecache 6904 // Flipping stuff in the codecache to be unreachable causes issues 6905 // with things like inline caches where the additional instructions 6906 // are not handled. 6907 if (CodeCache::find_blob(adr._target) == NULL) { 6908 return false; 6909 } 6910 } 6911 // For external_word_type/runtime_call_type if it is reachable from where we 6912 // are now (possibly a temp buffer) and where we might end up 6913 // anywhere in the codeCache then we are always reachable. 6914 // This would have to change if we ever save/restore shared code 6915 // to be more pessimistic. 6916 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); 6917 if (!is_simm32(disp)) return false; 6918 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); 6919 if (!is_simm32(disp)) return false; 6920 6921 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int)); 6922 6923 // Because rip relative is a disp + address_of_next_instruction and we 6924 // don't know the value of address_of_next_instruction we apply a fudge factor 6925 // to make sure we will be ok no matter the size of the instruction we get placed into. 6926 // We don't have to fudge the checks above here because they are already worst case. 6927 6928 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal 6929 // + 4 because better safe than sorry. 6930 const int fudge = 12 + 4; 6931 if (disp < 0) { 6932 disp -= fudge; 6933 } else { 6934 disp += fudge; 6935 } 6936 return is_simm32(disp); 6937 } 6938 6939 // Check if the polling page is not reachable from the code cache using rip-relative 6940 // addressing. 6941 bool Assembler::is_polling_page_far() { 6942 intptr_t addr = (intptr_t)os::get_polling_page(); 6943 return ForceUnreachable || 6944 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) || 6945 !is_simm32(addr - (intptr_t)CodeCache::high_bound()); 6946 } 6947 6948 void Assembler::emit_data64(jlong data, 6949 relocInfo::relocType rtype, 6950 int format) { 6951 if (rtype == relocInfo::none) { 6952 emit_int64(data); 6953 } else { 6954 emit_data64(data, Relocation::spec_simple(rtype), format); 6955 } 6956 } 6957 6958 void Assembler::emit_data64(jlong data, 6959 RelocationHolder const& rspec, 6960 int format) { 6961 assert(imm_operand == 0, "default format must be immediate in this file"); 6962 assert(imm_operand == format, "must be immediate"); 6963 assert(inst_mark() != NULL, "must be inside InstructionMark"); 6964 // Do not use AbstractAssembler::relocate, which is not intended for 6965 // embedded words. Instead, relocate to the enclosing instruction. 6966 code_section()->relocate(inst_mark(), rspec, format); 6967 #ifdef ASSERT 6968 check_relocation(rspec, format); 6969 #endif 6970 emit_int64(data); 6971 } 6972 6973 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 6974 if (reg_enc >= 8) { 6975 prefix(REX_B); 6976 reg_enc -= 8; 6977 } else if (byteinst && reg_enc >= 4) { 6978 prefix(REX); 6979 } 6980 return reg_enc; 6981 } 6982 6983 int Assembler::prefixq_and_encode(int reg_enc) { 6984 if (reg_enc < 8) { 6985 prefix(REX_W); 6986 } else { 6987 prefix(REX_WB); 6988 reg_enc -= 8; 6989 } 6990 return reg_enc; 6991 } 6992 6993 int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte) { 6994 if (dst_enc < 8) { 6995 if (src_enc >= 8) { 6996 prefix(REX_B); 6997 src_enc -= 8; 6998 } else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) { 6999 prefix(REX); 7000 } 7001 } else { 7002 if (src_enc < 8) { 7003 prefix(REX_R); 7004 } else { 7005 prefix(REX_RB); 7006 src_enc -= 8; 7007 } 7008 dst_enc -= 8; 7009 } 7010 return dst_enc << 3 | src_enc; 7011 } 7012 7013 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { 7014 if (dst_enc < 8) { 7015 if (src_enc < 8) { 7016 prefix(REX_W); 7017 } else { 7018 prefix(REX_WB); 7019 src_enc -= 8; 7020 } 7021 } else { 7022 if (src_enc < 8) { 7023 prefix(REX_WR); 7024 } else { 7025 prefix(REX_WRB); 7026 src_enc -= 8; 7027 } 7028 dst_enc -= 8; 7029 } 7030 return dst_enc << 3 | src_enc; 7031 } 7032 7033 void Assembler::prefix(Register reg) { 7034 if (reg->encoding() >= 8) { 7035 prefix(REX_B); 7036 } 7037 } 7038 7039 void Assembler::prefix(Register dst, Register src, Prefix p) { 7040 if (src->encoding() >= 8) { 7041 p = (Prefix)(p | REX_B); 7042 } 7043 if (dst->encoding() >= 8) { 7044 p = (Prefix)( p | REX_R); 7045 } 7046 if (p != Prefix_EMPTY) { 7047 // do not generate an empty prefix 7048 prefix(p); 7049 } 7050 } 7051 7052 void Assembler::prefix(Register dst, Address adr, Prefix p) { 7053 if (adr.base_needs_rex()) { 7054 if (adr.index_needs_rex()) { 7055 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 7056 } else { 7057 prefix(REX_B); 7058 } 7059 } else { 7060 if (adr.index_needs_rex()) { 7061 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 7062 } 7063 } 7064 if (dst->encoding() >= 8) { 7065 p = (Prefix)(p | REX_R); 7066 } 7067 if (p != Prefix_EMPTY) { 7068 // do not generate an empty prefix 7069 prefix(p); 7070 } 7071 } 7072 7073 void Assembler::prefix(Address adr) { 7074 if (adr.base_needs_rex()) { 7075 if (adr.index_needs_rex()) { 7076 prefix(REX_XB); 7077 } else { 7078 prefix(REX_B); 7079 } 7080 } else { 7081 if (adr.index_needs_rex()) { 7082 prefix(REX_X); 7083 } 7084 } 7085 } 7086 7087 void Assembler::prefixq(Address adr) { 7088 if (adr.base_needs_rex()) { 7089 if (adr.index_needs_rex()) { 7090 prefix(REX_WXB); 7091 } else { 7092 prefix(REX_WB); 7093 } 7094 } else { 7095 if (adr.index_needs_rex()) { 7096 prefix(REX_WX); 7097 } else { 7098 prefix(REX_W); 7099 } 7100 } 7101 } 7102 7103 7104 void Assembler::prefix(Address adr, Register reg, bool byteinst) { 7105 if (reg->encoding() < 8) { 7106 if (adr.base_needs_rex()) { 7107 if (adr.index_needs_rex()) { 7108 prefix(REX_XB); 7109 } else { 7110 prefix(REX_B); 7111 } 7112 } else { 7113 if (adr.index_needs_rex()) { 7114 prefix(REX_X); 7115 } else if (byteinst && reg->encoding() >= 4 ) { 7116 prefix(REX); 7117 } 7118 } 7119 } else { 7120 if (adr.base_needs_rex()) { 7121 if (adr.index_needs_rex()) { 7122 prefix(REX_RXB); 7123 } else { 7124 prefix(REX_RB); 7125 } 7126 } else { 7127 if (adr.index_needs_rex()) { 7128 prefix(REX_RX); 7129 } else { 7130 prefix(REX_R); 7131 } 7132 } 7133 } 7134 } 7135 7136 void Assembler::prefixq(Address adr, Register src) { 7137 if (src->encoding() < 8) { 7138 if (adr.base_needs_rex()) { 7139 if (adr.index_needs_rex()) { 7140 prefix(REX_WXB); 7141 } else { 7142 prefix(REX_WB); 7143 } 7144 } else { 7145 if (adr.index_needs_rex()) { 7146 prefix(REX_WX); 7147 } else { 7148 prefix(REX_W); 7149 } 7150 } 7151 } else { 7152 if (adr.base_needs_rex()) { 7153 if (adr.index_needs_rex()) { 7154 prefix(REX_WRXB); 7155 } else { 7156 prefix(REX_WRB); 7157 } 7158 } else { 7159 if (adr.index_needs_rex()) { 7160 prefix(REX_WRX); 7161 } else { 7162 prefix(REX_WR); 7163 } 7164 } 7165 } 7166 } 7167 7168 void Assembler::prefix(Address adr, XMMRegister reg) { 7169 if (reg->encoding() < 8) { 7170 if (adr.base_needs_rex()) { 7171 if (adr.index_needs_rex()) { 7172 prefix(REX_XB); 7173 } else { 7174 prefix(REX_B); 7175 } 7176 } else { 7177 if (adr.index_needs_rex()) { 7178 prefix(REX_X); 7179 } 7180 } 7181 } else { 7182 if (adr.base_needs_rex()) { 7183 if (adr.index_needs_rex()) { 7184 prefix(REX_RXB); 7185 } else { 7186 prefix(REX_RB); 7187 } 7188 } else { 7189 if (adr.index_needs_rex()) { 7190 prefix(REX_RX); 7191 } else { 7192 prefix(REX_R); 7193 } 7194 } 7195 } 7196 } 7197 7198 void Assembler::prefixq(Address adr, XMMRegister src) { 7199 if (src->encoding() < 8) { 7200 if (adr.base_needs_rex()) { 7201 if (adr.index_needs_rex()) { 7202 prefix(REX_WXB); 7203 } else { 7204 prefix(REX_WB); 7205 } 7206 } else { 7207 if (adr.index_needs_rex()) { 7208 prefix(REX_WX); 7209 } else { 7210 prefix(REX_W); 7211 } 7212 } 7213 } else { 7214 if (adr.base_needs_rex()) { 7215 if (adr.index_needs_rex()) { 7216 prefix(REX_WRXB); 7217 } else { 7218 prefix(REX_WRB); 7219 } 7220 } else { 7221 if (adr.index_needs_rex()) { 7222 prefix(REX_WRX); 7223 } else { 7224 prefix(REX_WR); 7225 } 7226 } 7227 } 7228 } 7229 7230 void Assembler::adcq(Register dst, int32_t imm32) { 7231 (void) prefixq_and_encode(dst->encoding()); 7232 emit_arith(0x81, 0xD0, dst, imm32); 7233 } 7234 7235 void Assembler::adcq(Register dst, Address src) { 7236 InstructionMark im(this); 7237 prefixq(src, dst); 7238 emit_int8(0x13); 7239 emit_operand(dst, src); 7240 } 7241 7242 void Assembler::adcq(Register dst, Register src) { 7243 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7244 emit_arith(0x13, 0xC0, dst, src); 7245 } 7246 7247 void Assembler::addq(Address dst, int32_t imm32) { 7248 InstructionMark im(this); 7249 prefixq(dst); 7250 emit_arith_operand(0x81, rax, dst,imm32); 7251 } 7252 7253 void Assembler::addq(Address dst, Register src) { 7254 InstructionMark im(this); 7255 prefixq(dst, src); 7256 emit_int8(0x01); 7257 emit_operand(src, dst); 7258 } 7259 7260 void Assembler::addq(Register dst, int32_t imm32) { 7261 (void) prefixq_and_encode(dst->encoding()); 7262 emit_arith(0x81, 0xC0, dst, imm32); 7263 } 7264 7265 void Assembler::addq(Register dst, Address src) { 7266 InstructionMark im(this); 7267 prefixq(src, dst); 7268 emit_int8(0x03); 7269 emit_operand(dst, src); 7270 } 7271 7272 void Assembler::addq(Register dst, Register src) { 7273 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7274 emit_arith(0x03, 0xC0, dst, src); 7275 } 7276 7277 void Assembler::adcxq(Register dst, Register src) { 7278 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 7279 emit_int8((unsigned char)0x66); 7280 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7281 emit_int8(0x0F); 7282 emit_int8(0x38); 7283 emit_int8((unsigned char)0xF6); 7284 emit_int8((unsigned char)(0xC0 | encode)); 7285 } 7286 7287 void Assembler::adoxq(Register dst, Register src) { 7288 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 7289 emit_int8((unsigned char)0xF3); 7290 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7291 emit_int8(0x0F); 7292 emit_int8(0x38); 7293 emit_int8((unsigned char)0xF6); 7294 emit_int8((unsigned char)(0xC0 | encode)); 7295 } 7296 7297 void Assembler::andq(Address dst, int32_t imm32) { 7298 InstructionMark im(this); 7299 prefixq(dst); 7300 emit_int8((unsigned char)0x81); 7301 emit_operand(rsp, dst, 4); 7302 emit_int32(imm32); 7303 } 7304 7305 void Assembler::andq(Register dst, int32_t imm32) { 7306 (void) prefixq_and_encode(dst->encoding()); 7307 emit_arith(0x81, 0xE0, dst, imm32); 7308 } 7309 7310 void Assembler::andq(Register dst, Address src) { 7311 InstructionMark im(this); 7312 prefixq(src, dst); 7313 emit_int8(0x23); 7314 emit_operand(dst, src); 7315 } 7316 7317 void Assembler::andq(Register dst, Register src) { 7318 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7319 emit_arith(0x23, 0xC0, dst, src); 7320 } 7321 7322 void Assembler::andnq(Register dst, Register src1, Register src2) { 7323 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 7324 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7325 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 7326 emit_int8((unsigned char)0xF2); 7327 emit_int8((unsigned char)(0xC0 | encode)); 7328 } 7329 7330 void Assembler::andnq(Register dst, Register src1, Address src2) { 7331 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 7332 InstructionMark im(this); 7333 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7334 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 7335 emit_int8((unsigned char)0xF2); 7336 emit_operand(dst, src2); 7337 } 7338 7339 void Assembler::bsfq(Register dst, Register src) { 7340 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7341 emit_int8(0x0F); 7342 emit_int8((unsigned char)0xBC); 7343 emit_int8((unsigned char)(0xC0 | encode)); 7344 } 7345 7346 void Assembler::bsrq(Register dst, Register src) { 7347 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7348 emit_int8(0x0F); 7349 emit_int8((unsigned char)0xBD); 7350 emit_int8((unsigned char)(0xC0 | encode)); 7351 } 7352 7353 void Assembler::bswapq(Register reg) { 7354 int encode = prefixq_and_encode(reg->encoding()); 7355 emit_int8(0x0F); 7356 emit_int8((unsigned char)(0xC8 | encode)); 7357 } 7358 7359 void Assembler::blsiq(Register dst, Register src) { 7360 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 7361 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7362 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 7363 emit_int8((unsigned char)0xF3); 7364 emit_int8((unsigned char)(0xC0 | encode)); 7365 } 7366 7367 void Assembler::blsiq(Register dst, Address src) { 7368 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 7369 InstructionMark im(this); 7370 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7371 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 7372 emit_int8((unsigned char)0xF3); 7373 emit_operand(rbx, src); 7374 } 7375 7376 void Assembler::blsmskq(Register dst, Register src) { 7377 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 7378 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7379 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 7380 emit_int8((unsigned char)0xF3); 7381 emit_int8((unsigned char)(0xC0 | encode)); 7382 } 7383 7384 void Assembler::blsmskq(Register dst, Address src) { 7385 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 7386 InstructionMark im(this); 7387 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7388 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 7389 emit_int8((unsigned char)0xF3); 7390 emit_operand(rdx, src); 7391 } 7392 7393 void Assembler::blsrq(Register dst, Register src) { 7394 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 7395 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7396 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 7397 emit_int8((unsigned char)0xF3); 7398 emit_int8((unsigned char)(0xC0 | encode)); 7399 } 7400 7401 void Assembler::blsrq(Register dst, Address src) { 7402 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 7403 InstructionMark im(this); 7404 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7405 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 7406 emit_int8((unsigned char)0xF3); 7407 emit_operand(rcx, src); 7408 } 7409 7410 void Assembler::cdqq() { 7411 prefix(REX_W); 7412 emit_int8((unsigned char)0x99); 7413 } 7414 7415 void Assembler::clflush(Address adr) { 7416 prefix(adr); 7417 emit_int8(0x0F); 7418 emit_int8((unsigned char)0xAE); 7419 emit_operand(rdi, adr); 7420 } 7421 7422 void Assembler::cmovq(Condition cc, Register dst, Register src) { 7423 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7424 emit_int8(0x0F); 7425 emit_int8(0x40 | cc); 7426 emit_int8((unsigned char)(0xC0 | encode)); 7427 } 7428 7429 void Assembler::cmovq(Condition cc, Register dst, Address src) { 7430 InstructionMark im(this); 7431 prefixq(src, dst); 7432 emit_int8(0x0F); 7433 emit_int8(0x40 | cc); 7434 emit_operand(dst, src); 7435 } 7436 7437 void Assembler::cmpq(Address dst, int32_t imm32) { 7438 InstructionMark im(this); 7439 prefixq(dst); 7440 emit_int8((unsigned char)0x81); 7441 emit_operand(rdi, dst, 4); 7442 emit_int32(imm32); 7443 } 7444 7445 void Assembler::cmpq(Register dst, int32_t imm32) { 7446 (void) prefixq_and_encode(dst->encoding()); 7447 emit_arith(0x81, 0xF8, dst, imm32); 7448 } 7449 7450 void Assembler::cmpq(Address dst, Register src) { 7451 InstructionMark im(this); 7452 prefixq(dst, src); 7453 emit_int8(0x3B); 7454 emit_operand(src, dst); 7455 } 7456 7457 void Assembler::cmpq(Register dst, Register src) { 7458 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7459 emit_arith(0x3B, 0xC0, dst, src); 7460 } 7461 7462 void Assembler::cmpq(Register dst, Address src) { 7463 InstructionMark im(this); 7464 prefixq(src, dst); 7465 emit_int8(0x3B); 7466 emit_operand(dst, src); 7467 } 7468 7469 void Assembler::cmpxchgq(Register reg, Address adr) { 7470 InstructionMark im(this); 7471 prefixq(adr, reg); 7472 emit_int8(0x0F); 7473 emit_int8((unsigned char)0xB1); 7474 emit_operand(reg, adr); 7475 } 7476 7477 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { 7478 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7479 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7480 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 7481 emit_int8(0x2A); 7482 emit_int8((unsigned char)(0xC0 | encode)); 7483 } 7484 7485 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { 7486 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7487 InstructionMark im(this); 7488 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7489 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 7490 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 7491 emit_int8(0x2A); 7492 emit_operand(dst, src); 7493 } 7494 7495 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) { 7496 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7497 InstructionMark im(this); 7498 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7499 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 7500 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 7501 emit_int8(0x2A); 7502 emit_operand(dst, src); 7503 } 7504 7505 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { 7506 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7507 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7508 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 7509 emit_int8(0x2C); 7510 emit_int8((unsigned char)(0xC0 | encode)); 7511 } 7512 7513 void Assembler::cvttss2siq(Register dst, XMMRegister src) { 7514 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7515 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7516 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 7517 emit_int8(0x2C); 7518 emit_int8((unsigned char)(0xC0 | encode)); 7519 } 7520 7521 void Assembler::decl(Register dst) { 7522 // Don't use it directly. Use MacroAssembler::decrementl() instead. 7523 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) 7524 int encode = prefix_and_encode(dst->encoding()); 7525 emit_int8((unsigned char)0xFF); 7526 emit_int8((unsigned char)(0xC8 | encode)); 7527 } 7528 7529 void Assembler::decq(Register dst) { 7530 // Don't use it directly. Use MacroAssembler::decrementq() instead. 7531 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 7532 int encode = prefixq_and_encode(dst->encoding()); 7533 emit_int8((unsigned char)0xFF); 7534 emit_int8(0xC8 | encode); 7535 } 7536 7537 void Assembler::decq(Address dst) { 7538 // Don't use it directly. Use MacroAssembler::decrementq() instead. 7539 InstructionMark im(this); 7540 prefixq(dst); 7541 emit_int8((unsigned char)0xFF); 7542 emit_operand(rcx, dst); 7543 } 7544 7545 void Assembler::fxrstor(Address src) { 7546 prefixq(src); 7547 emit_int8(0x0F); 7548 emit_int8((unsigned char)0xAE); 7549 emit_operand(as_Register(1), src); 7550 } 7551 7552 void Assembler::xrstor(Address src) { 7553 prefixq(src); 7554 emit_int8(0x0F); 7555 emit_int8((unsigned char)0xAE); 7556 emit_operand(as_Register(5), src); 7557 } 7558 7559 void Assembler::fxsave(Address dst) { 7560 prefixq(dst); 7561 emit_int8(0x0F); 7562 emit_int8((unsigned char)0xAE); 7563 emit_operand(as_Register(0), dst); 7564 } 7565 7566 void Assembler::xsave(Address dst) { 7567 prefixq(dst); 7568 emit_int8(0x0F); 7569 emit_int8((unsigned char)0xAE); 7570 emit_operand(as_Register(4), dst); 7571 } 7572 7573 void Assembler::idivq(Register src) { 7574 int encode = prefixq_and_encode(src->encoding()); 7575 emit_int8((unsigned char)0xF7); 7576 emit_int8((unsigned char)(0xF8 | encode)); 7577 } 7578 7579 void Assembler::imulq(Register dst, Register src) { 7580 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7581 emit_int8(0x0F); 7582 emit_int8((unsigned char)0xAF); 7583 emit_int8((unsigned char)(0xC0 | encode)); 7584 } 7585 7586 void Assembler::imulq(Register dst, Register src, int value) { 7587 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7588 if (is8bit(value)) { 7589 emit_int8(0x6B); 7590 emit_int8((unsigned char)(0xC0 | encode)); 7591 emit_int8(value & 0xFF); 7592 } else { 7593 emit_int8(0x69); 7594 emit_int8((unsigned char)(0xC0 | encode)); 7595 emit_int32(value); 7596 } 7597 } 7598 7599 void Assembler::imulq(Register dst, Address src) { 7600 InstructionMark im(this); 7601 prefixq(src, dst); 7602 emit_int8(0x0F); 7603 emit_int8((unsigned char) 0xAF); 7604 emit_operand(dst, src); 7605 } 7606 7607 void Assembler::incl(Register dst) { 7608 // Don't use it directly. Use MacroAssembler::incrementl() instead. 7609 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 7610 int encode = prefix_and_encode(dst->encoding()); 7611 emit_int8((unsigned char)0xFF); 7612 emit_int8((unsigned char)(0xC0 | encode)); 7613 } 7614 7615 void Assembler::incq(Register dst) { 7616 // Don't use it directly. Use MacroAssembler::incrementq() instead. 7617 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 7618 int encode = prefixq_and_encode(dst->encoding()); 7619 emit_int8((unsigned char)0xFF); 7620 emit_int8((unsigned char)(0xC0 | encode)); 7621 } 7622 7623 void Assembler::incq(Address dst) { 7624 // Don't use it directly. Use MacroAssembler::incrementq() instead. 7625 InstructionMark im(this); 7626 prefixq(dst); 7627 emit_int8((unsigned char)0xFF); 7628 emit_operand(rax, dst); 7629 } 7630 7631 void Assembler::lea(Register dst, Address src) { 7632 leaq(dst, src); 7633 } 7634 7635 void Assembler::leaq(Register dst, Address src) { 7636 InstructionMark im(this); 7637 prefixq(src, dst); 7638 emit_int8((unsigned char)0x8D); 7639 emit_operand(dst, src); 7640 } 7641 7642 void Assembler::mov64(Register dst, int64_t imm64) { 7643 InstructionMark im(this); 7644 int encode = prefixq_and_encode(dst->encoding()); 7645 emit_int8((unsigned char)(0xB8 | encode)); 7646 emit_int64(imm64); 7647 } 7648 7649 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 7650 InstructionMark im(this); 7651 int encode = prefixq_and_encode(dst->encoding()); 7652 emit_int8(0xB8 | encode); 7653 emit_data64(imm64, rspec); 7654 } 7655 7656 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { 7657 InstructionMark im(this); 7658 int encode = prefix_and_encode(dst->encoding()); 7659 emit_int8((unsigned char)(0xB8 | encode)); 7660 emit_data((int)imm32, rspec, narrow_oop_operand); 7661 } 7662 7663 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { 7664 InstructionMark im(this); 7665 prefix(dst); 7666 emit_int8((unsigned char)0xC7); 7667 emit_operand(rax, dst, 4); 7668 emit_data((int)imm32, rspec, narrow_oop_operand); 7669 } 7670 7671 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { 7672 InstructionMark im(this); 7673 int encode = prefix_and_encode(src1->encoding()); 7674 emit_int8((unsigned char)0x81); 7675 emit_int8((unsigned char)(0xF8 | encode)); 7676 emit_data((int)imm32, rspec, narrow_oop_operand); 7677 } 7678 7679 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { 7680 InstructionMark im(this); 7681 prefix(src1); 7682 emit_int8((unsigned char)0x81); 7683 emit_operand(rax, src1, 4); 7684 emit_data((int)imm32, rspec, narrow_oop_operand); 7685 } 7686 7687 void Assembler::lzcntq(Register dst, Register src) { 7688 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 7689 emit_int8((unsigned char)0xF3); 7690 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7691 emit_int8(0x0F); 7692 emit_int8((unsigned char)0xBD); 7693 emit_int8((unsigned char)(0xC0 | encode)); 7694 } 7695 7696 void Assembler::movdq(XMMRegister dst, Register src) { 7697 // table D-1 says MMX/SSE2 7698 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7699 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7700 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7701 emit_int8(0x6E); 7702 emit_int8((unsigned char)(0xC0 | encode)); 7703 } 7704 7705 void Assembler::movdq(Register dst, XMMRegister src) { 7706 // table D-1 says MMX/SSE2 7707 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7708 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7709 // swap src/dst to get correct prefix 7710 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7711 emit_int8(0x7E); 7712 emit_int8((unsigned char)(0xC0 | encode)); 7713 } 7714 7715 void Assembler::movq(Register dst, Register src) { 7716 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7717 emit_int8((unsigned char)0x8B); 7718 emit_int8((unsigned char)(0xC0 | encode)); 7719 } 7720 7721 void Assembler::movq(Register dst, Address src) { 7722 InstructionMark im(this); 7723 prefixq(src, dst); 7724 emit_int8((unsigned char)0x8B); 7725 emit_operand(dst, src); 7726 } 7727 7728 void Assembler::movq(Address dst, Register src) { 7729 InstructionMark im(this); 7730 prefixq(dst, src); 7731 emit_int8((unsigned char)0x89); 7732 emit_operand(src, dst); 7733 } 7734 7735 void Assembler::movsbq(Register dst, Address src) { 7736 InstructionMark im(this); 7737 prefixq(src, dst); 7738 emit_int8(0x0F); 7739 emit_int8((unsigned char)0xBE); 7740 emit_operand(dst, src); 7741 } 7742 7743 void Assembler::movsbq(Register dst, Register src) { 7744 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7745 emit_int8(0x0F); 7746 emit_int8((unsigned char)0xBE); 7747 emit_int8((unsigned char)(0xC0 | encode)); 7748 } 7749 7750 void Assembler::movslq(Register dst, int32_t imm32) { 7751 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 7752 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 7753 // as a result we shouldn't use until tested at runtime... 7754 ShouldNotReachHere(); 7755 InstructionMark im(this); 7756 int encode = prefixq_and_encode(dst->encoding()); 7757 emit_int8((unsigned char)(0xC7 | encode)); 7758 emit_int32(imm32); 7759 } 7760 7761 void Assembler::movslq(Address dst, int32_t imm32) { 7762 assert(is_simm32(imm32), "lost bits"); 7763 InstructionMark im(this); 7764 prefixq(dst); 7765 emit_int8((unsigned char)0xC7); 7766 emit_operand(rax, dst, 4); 7767 emit_int32(imm32); 7768 } 7769 7770 void Assembler::movslq(Register dst, Address src) { 7771 InstructionMark im(this); 7772 prefixq(src, dst); 7773 emit_int8(0x63); 7774 emit_operand(dst, src); 7775 } 7776 7777 void Assembler::movslq(Register dst, Register src) { 7778 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7779 emit_int8(0x63); 7780 emit_int8((unsigned char)(0xC0 | encode)); 7781 } 7782 7783 void Assembler::movswq(Register dst, Address src) { 7784 InstructionMark im(this); 7785 prefixq(src, dst); 7786 emit_int8(0x0F); 7787 emit_int8((unsigned char)0xBF); 7788 emit_operand(dst, src); 7789 } 7790 7791 void Assembler::movswq(Register dst, Register src) { 7792 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7793 emit_int8((unsigned char)0x0F); 7794 emit_int8((unsigned char)0xBF); 7795 emit_int8((unsigned char)(0xC0 | encode)); 7796 } 7797 7798 void Assembler::movzbq(Register dst, Address src) { 7799 InstructionMark im(this); 7800 prefixq(src, dst); 7801 emit_int8((unsigned char)0x0F); 7802 emit_int8((unsigned char)0xB6); 7803 emit_operand(dst, src); 7804 } 7805 7806 void Assembler::movzbq(Register dst, Register src) { 7807 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7808 emit_int8(0x0F); 7809 emit_int8((unsigned char)0xB6); 7810 emit_int8(0xC0 | encode); 7811 } 7812 7813 void Assembler::movzwq(Register dst, Address src) { 7814 InstructionMark im(this); 7815 prefixq(src, dst); 7816 emit_int8((unsigned char)0x0F); 7817 emit_int8((unsigned char)0xB7); 7818 emit_operand(dst, src); 7819 } 7820 7821 void Assembler::movzwq(Register dst, Register src) { 7822 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7823 emit_int8((unsigned char)0x0F); 7824 emit_int8((unsigned char)0xB7); 7825 emit_int8((unsigned char)(0xC0 | encode)); 7826 } 7827 7828 void Assembler::mulq(Address src) { 7829 InstructionMark im(this); 7830 prefixq(src); 7831 emit_int8((unsigned char)0xF7); 7832 emit_operand(rsp, src); 7833 } 7834 7835 void Assembler::mulq(Register src) { 7836 int encode = prefixq_and_encode(src->encoding()); 7837 emit_int8((unsigned char)0xF7); 7838 emit_int8((unsigned char)(0xE0 | encode)); 7839 } 7840 7841 void Assembler::mulxq(Register dst1, Register dst2, Register src) { 7842 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 7843 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 7844 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 7845 emit_int8((unsigned char)0xF6); 7846 emit_int8((unsigned char)(0xC0 | encode)); 7847 } 7848 7849 void Assembler::negq(Register dst) { 7850 int encode = prefixq_and_encode(dst->encoding()); 7851 emit_int8((unsigned char)0xF7); 7852 emit_int8((unsigned char)(0xD8 | encode)); 7853 } 7854 7855 void Assembler::notq(Register dst) { 7856 int encode = prefixq_and_encode(dst->encoding()); 7857 emit_int8((unsigned char)0xF7); 7858 emit_int8((unsigned char)(0xD0 | encode)); 7859 } 7860 7861 void Assembler::orq(Address dst, int32_t imm32) { 7862 InstructionMark im(this); 7863 prefixq(dst); 7864 emit_int8((unsigned char)0x81); 7865 emit_operand(rcx, dst, 4); 7866 emit_int32(imm32); 7867 } 7868 7869 void Assembler::orq(Register dst, int32_t imm32) { 7870 (void) prefixq_and_encode(dst->encoding()); 7871 emit_arith(0x81, 0xC8, dst, imm32); 7872 } 7873 7874 void Assembler::orq(Register dst, Address src) { 7875 InstructionMark im(this); 7876 prefixq(src, dst); 7877 emit_int8(0x0B); 7878 emit_operand(dst, src); 7879 } 7880 7881 void Assembler::orq(Register dst, Register src) { 7882 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7883 emit_arith(0x0B, 0xC0, dst, src); 7884 } 7885 7886 void Assembler::popa() { // 64bit 7887 movq(r15, Address(rsp, 0)); 7888 movq(r14, Address(rsp, wordSize)); 7889 movq(r13, Address(rsp, 2 * wordSize)); 7890 movq(r12, Address(rsp, 3 * wordSize)); 7891 movq(r11, Address(rsp, 4 * wordSize)); 7892 movq(r10, Address(rsp, 5 * wordSize)); 7893 movq(r9, Address(rsp, 6 * wordSize)); 7894 movq(r8, Address(rsp, 7 * wordSize)); 7895 movq(rdi, Address(rsp, 8 * wordSize)); 7896 movq(rsi, Address(rsp, 9 * wordSize)); 7897 movq(rbp, Address(rsp, 10 * wordSize)); 7898 // skip rsp 7899 movq(rbx, Address(rsp, 12 * wordSize)); 7900 movq(rdx, Address(rsp, 13 * wordSize)); 7901 movq(rcx, Address(rsp, 14 * wordSize)); 7902 movq(rax, Address(rsp, 15 * wordSize)); 7903 7904 addq(rsp, 16 * wordSize); 7905 } 7906 7907 void Assembler::popcntq(Register dst, Address src) { 7908 assert(VM_Version::supports_popcnt(), "must support"); 7909 InstructionMark im(this); 7910 emit_int8((unsigned char)0xF3); 7911 prefixq(src, dst); 7912 emit_int8((unsigned char)0x0F); 7913 emit_int8((unsigned char)0xB8); 7914 emit_operand(dst, src); 7915 } 7916 7917 void Assembler::popcntq(Register dst, Register src) { 7918 assert(VM_Version::supports_popcnt(), "must support"); 7919 emit_int8((unsigned char)0xF3); 7920 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7921 emit_int8((unsigned char)0x0F); 7922 emit_int8((unsigned char)0xB8); 7923 emit_int8((unsigned char)(0xC0 | encode)); 7924 } 7925 7926 void Assembler::popq(Address dst) { 7927 InstructionMark im(this); 7928 prefixq(dst); 7929 emit_int8((unsigned char)0x8F); 7930 emit_operand(rax, dst); 7931 } 7932 7933 void Assembler::pusha() { // 64bit 7934 // we have to store original rsp. ABI says that 128 bytes 7935 // below rsp are local scratch. 7936 movq(Address(rsp, -5 * wordSize), rsp); 7937 7938 subq(rsp, 16 * wordSize); 7939 7940 movq(Address(rsp, 15 * wordSize), rax); 7941 movq(Address(rsp, 14 * wordSize), rcx); 7942 movq(Address(rsp, 13 * wordSize), rdx); 7943 movq(Address(rsp, 12 * wordSize), rbx); 7944 // skip rsp 7945 movq(Address(rsp, 10 * wordSize), rbp); 7946 movq(Address(rsp, 9 * wordSize), rsi); 7947 movq(Address(rsp, 8 * wordSize), rdi); 7948 movq(Address(rsp, 7 * wordSize), r8); 7949 movq(Address(rsp, 6 * wordSize), r9); 7950 movq(Address(rsp, 5 * wordSize), r10); 7951 movq(Address(rsp, 4 * wordSize), r11); 7952 movq(Address(rsp, 3 * wordSize), r12); 7953 movq(Address(rsp, 2 * wordSize), r13); 7954 movq(Address(rsp, wordSize), r14); 7955 movq(Address(rsp, 0), r15); 7956 } 7957 7958 void Assembler::pushq(Address src) { 7959 InstructionMark im(this); 7960 prefixq(src); 7961 emit_int8((unsigned char)0xFF); 7962 emit_operand(rsi, src); 7963 } 7964 7965 void Assembler::rclq(Register dst, int imm8) { 7966 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7967 int encode = prefixq_and_encode(dst->encoding()); 7968 if (imm8 == 1) { 7969 emit_int8((unsigned char)0xD1); 7970 emit_int8((unsigned char)(0xD0 | encode)); 7971 } else { 7972 emit_int8((unsigned char)0xC1); 7973 emit_int8((unsigned char)(0xD0 | encode)); 7974 emit_int8(imm8); 7975 } 7976 } 7977 7978 void Assembler::rcrq(Register dst, int imm8) { 7979 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7980 int encode = prefixq_and_encode(dst->encoding()); 7981 if (imm8 == 1) { 7982 emit_int8((unsigned char)0xD1); 7983 emit_int8((unsigned char)(0xD8 | encode)); 7984 } else { 7985 emit_int8((unsigned char)0xC1); 7986 emit_int8((unsigned char)(0xD8 | encode)); 7987 emit_int8(imm8); 7988 } 7989 } 7990 7991 void Assembler::rorq(Register dst, int imm8) { 7992 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7993 int encode = prefixq_and_encode(dst->encoding()); 7994 if (imm8 == 1) { 7995 emit_int8((unsigned char)0xD1); 7996 emit_int8((unsigned char)(0xC8 | encode)); 7997 } else { 7998 emit_int8((unsigned char)0xC1); 7999 emit_int8((unsigned char)(0xc8 | encode)); 8000 emit_int8(imm8); 8001 } 8002 } 8003 8004 void Assembler::rorxq(Register dst, Register src, int imm8) { 8005 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 8006 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); 8007 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 8008 emit_int8((unsigned char)0xF0); 8009 emit_int8((unsigned char)(0xC0 | encode)); 8010 emit_int8(imm8); 8011 } 8012 8013 void Assembler::sarq(Register dst, int imm8) { 8014 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8015 int encode = prefixq_and_encode(dst->encoding()); 8016 if (imm8 == 1) { 8017 emit_int8((unsigned char)0xD1); 8018 emit_int8((unsigned char)(0xF8 | encode)); 8019 } else { 8020 emit_int8((unsigned char)0xC1); 8021 emit_int8((unsigned char)(0xF8 | encode)); 8022 emit_int8(imm8); 8023 } 8024 } 8025 8026 void Assembler::sarq(Register dst) { 8027 int encode = prefixq_and_encode(dst->encoding()); 8028 emit_int8((unsigned char)0xD3); 8029 emit_int8((unsigned char)(0xF8 | encode)); 8030 } 8031 8032 void Assembler::sbbq(Address dst, int32_t imm32) { 8033 InstructionMark im(this); 8034 prefixq(dst); 8035 emit_arith_operand(0x81, rbx, dst, imm32); 8036 } 8037 8038 void Assembler::sbbq(Register dst, int32_t imm32) { 8039 (void) prefixq_and_encode(dst->encoding()); 8040 emit_arith(0x81, 0xD8, dst, imm32); 8041 } 8042 8043 void Assembler::sbbq(Register dst, Address src) { 8044 InstructionMark im(this); 8045 prefixq(src, dst); 8046 emit_int8(0x1B); 8047 emit_operand(dst, src); 8048 } 8049 8050 void Assembler::sbbq(Register dst, Register src) { 8051 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8052 emit_arith(0x1B, 0xC0, dst, src); 8053 } 8054 8055 void Assembler::shlq(Register dst, int imm8) { 8056 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8057 int encode = prefixq_and_encode(dst->encoding()); 8058 if (imm8 == 1) { 8059 emit_int8((unsigned char)0xD1); 8060 emit_int8((unsigned char)(0xE0 | encode)); 8061 } else { 8062 emit_int8((unsigned char)0xC1); 8063 emit_int8((unsigned char)(0xE0 | encode)); 8064 emit_int8(imm8); 8065 } 8066 } 8067 8068 void Assembler::shlq(Register dst) { 8069 int encode = prefixq_and_encode(dst->encoding()); 8070 emit_int8((unsigned char)0xD3); 8071 emit_int8((unsigned char)(0xE0 | encode)); 8072 } 8073 8074 void Assembler::shrq(Register dst, int imm8) { 8075 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8076 int encode = prefixq_and_encode(dst->encoding()); 8077 emit_int8((unsigned char)0xC1); 8078 emit_int8((unsigned char)(0xE8 | encode)); 8079 emit_int8(imm8); 8080 } 8081 8082 void Assembler::shrq(Register dst) { 8083 int encode = prefixq_and_encode(dst->encoding()); 8084 emit_int8((unsigned char)0xD3); 8085 emit_int8(0xE8 | encode); 8086 } 8087 8088 void Assembler::subq(Address dst, int32_t imm32) { 8089 InstructionMark im(this); 8090 prefixq(dst); 8091 emit_arith_operand(0x81, rbp, dst, imm32); 8092 } 8093 8094 void Assembler::subq(Address dst, Register src) { 8095 InstructionMark im(this); 8096 prefixq(dst, src); 8097 emit_int8(0x29); 8098 emit_operand(src, dst); 8099 } 8100 8101 void Assembler::subq(Register dst, int32_t imm32) { 8102 (void) prefixq_and_encode(dst->encoding()); 8103 emit_arith(0x81, 0xE8, dst, imm32); 8104 } 8105 8106 // Force generation of a 4 byte immediate value even if it fits into 8bit 8107 void Assembler::subq_imm32(Register dst, int32_t imm32) { 8108 (void) prefixq_and_encode(dst->encoding()); 8109 emit_arith_imm32(0x81, 0xE8, dst, imm32); 8110 } 8111 8112 void Assembler::subq(Register dst, Address src) { 8113 InstructionMark im(this); 8114 prefixq(src, dst); 8115 emit_int8(0x2B); 8116 emit_operand(dst, src); 8117 } 8118 8119 void Assembler::subq(Register dst, Register src) { 8120 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8121 emit_arith(0x2B, 0xC0, dst, src); 8122 } 8123 8124 void Assembler::testq(Register dst, int32_t imm32) { 8125 // not using emit_arith because test 8126 // doesn't support sign-extension of 8127 // 8bit operands 8128 int encode = dst->encoding(); 8129 if (encode == 0) { 8130 prefix(REX_W); 8131 emit_int8((unsigned char)0xA9); 8132 } else { 8133 encode = prefixq_and_encode(encode); 8134 emit_int8((unsigned char)0xF7); 8135 emit_int8((unsigned char)(0xC0 | encode)); 8136 } 8137 emit_int32(imm32); 8138 } 8139 8140 void Assembler::testq(Register dst, Register src) { 8141 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8142 emit_arith(0x85, 0xC0, dst, src); 8143 } 8144 8145 void Assembler::xaddq(Address dst, Register src) { 8146 InstructionMark im(this); 8147 prefixq(dst, src); 8148 emit_int8(0x0F); 8149 emit_int8((unsigned char)0xC1); 8150 emit_operand(src, dst); 8151 } 8152 8153 void Assembler::xchgq(Register dst, Address src) { 8154 InstructionMark im(this); 8155 prefixq(src, dst); 8156 emit_int8((unsigned char)0x87); 8157 emit_operand(dst, src); 8158 } 8159 8160 void Assembler::xchgq(Register dst, Register src) { 8161 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8162 emit_int8((unsigned char)0x87); 8163 emit_int8((unsigned char)(0xc0 | encode)); 8164 } 8165 8166 void Assembler::xorq(Register dst, Register src) { 8167 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8168 emit_arith(0x33, 0xC0, dst, src); 8169 } 8170 8171 void Assembler::xorq(Register dst, Address src) { 8172 InstructionMark im(this); 8173 prefixq(src, dst); 8174 emit_int8(0x33); 8175 emit_operand(dst, src); 8176 } 8177 8178 #endif // !LP64 --- EOF ---