1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "gc_interface/collectedHeap.inline.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "memory/cardTableModRefBS.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/objectMonitor.hpp" 36 #include "runtime/os.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "utilities/macros.hpp" 40 #if INCLUDE_ALL_GCS 41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 43 #include "gc_implementation/g1/heapRegion.hpp" 44 #endif // INCLUDE_ALL_GCS 45 46 #ifdef PRODUCT 47 #define BLOCK_COMMENT(str) /* nothing */ 48 #define STOP(error) stop(error) 49 #else 50 #define BLOCK_COMMENT(str) block_comment(str) 51 #define STOP(error) block_comment(error); stop(error) 52 #endif 53 54 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 55 // Implementation of AddressLiteral 56 57 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms. 58 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = { 59 // -----------------Table 4.5 -------------------- // 60 16, 32, 64, // EVEX_FV(0) 61 4, 4, 4, // EVEX_FV(1) - with Evex.b 62 16, 32, 64, // EVEX_FV(2) - with Evex.w 63 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b 64 8, 16, 32, // EVEX_HV(0) 65 4, 4, 4, // EVEX_HV(1) - with Evex.b 66 // -----------------Table 4.6 -------------------- // 67 16, 32, 64, // EVEX_FVM(0) 68 1, 1, 1, // EVEX_T1S(0) 69 2, 2, 2, // EVEX_T1S(1) 70 4, 4, 4, // EVEX_T1S(2) 71 8, 8, 8, // EVEX_T1S(3) 72 4, 4, 4, // EVEX_T1F(0) 73 8, 8, 8, // EVEX_T1F(1) 74 8, 8, 8, // EVEX_T2(0) 75 0, 16, 16, // EVEX_T2(1) 76 0, 16, 16, // EVEX_T4(0) 77 0, 0, 32, // EVEX_T4(1) 78 0, 0, 32, // EVEX_T8(0) 79 8, 16, 32, // EVEX_HVM(0) 80 4, 8, 16, // EVEX_QVM(0) 81 2, 4, 8, // EVEX_OVM(0) 82 16, 16, 16, // EVEX_M128(0) 83 8, 32, 64, // EVEX_DUP(0) 84 0, 0, 0 // EVEX_NTUP 85 }; 86 87 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { 88 _is_lval = false; 89 _target = target; 90 switch (rtype) { 91 case relocInfo::oop_type: 92 case relocInfo::metadata_type: 93 // Oops are a special case. Normally they would be their own section 94 // but in cases like icBuffer they are literals in the code stream that 95 // we don't have a section for. We use none so that we get a literal address 96 // which is always patchable. 97 break; 98 case relocInfo::external_word_type: 99 _rspec = external_word_Relocation::spec(target); 100 break; 101 case relocInfo::internal_word_type: 102 _rspec = internal_word_Relocation::spec(target); 103 break; 104 case relocInfo::opt_virtual_call_type: 105 _rspec = opt_virtual_call_Relocation::spec(); 106 break; 107 case relocInfo::static_call_type: 108 _rspec = static_call_Relocation::spec(); 109 break; 110 case relocInfo::runtime_call_type: 111 _rspec = runtime_call_Relocation::spec(); 112 break; 113 case relocInfo::poll_type: 114 case relocInfo::poll_return_type: 115 _rspec = Relocation::spec_simple(rtype); 116 break; 117 case relocInfo::none: 118 break; 119 default: 120 ShouldNotReachHere(); 121 break; 122 } 123 } 124 125 // Implementation of Address 126 127 #ifdef _LP64 128 129 Address Address::make_array(ArrayAddress adr) { 130 // Not implementable on 64bit machines 131 // Should have been handled higher up the call chain. 132 ShouldNotReachHere(); 133 return Address(); 134 } 135 136 // exceedingly dangerous constructor 137 Address::Address(int disp, address loc, relocInfo::relocType rtype) { 138 _base = noreg; 139 _index = noreg; 140 _scale = no_scale; 141 _disp = disp; 142 switch (rtype) { 143 case relocInfo::external_word_type: 144 _rspec = external_word_Relocation::spec(loc); 145 break; 146 case relocInfo::internal_word_type: 147 _rspec = internal_word_Relocation::spec(loc); 148 break; 149 case relocInfo::runtime_call_type: 150 // HMM 151 _rspec = runtime_call_Relocation::spec(); 152 break; 153 case relocInfo::poll_type: 154 case relocInfo::poll_return_type: 155 _rspec = Relocation::spec_simple(rtype); 156 break; 157 case relocInfo::none: 158 break; 159 default: 160 ShouldNotReachHere(); 161 } 162 } 163 #else // LP64 164 165 Address Address::make_array(ArrayAddress adr) { 166 AddressLiteral base = adr.base(); 167 Address index = adr.index(); 168 assert(index._disp == 0, "must not have disp"); // maybe it can? 169 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); 170 array._rspec = base._rspec; 171 return array; 172 } 173 174 // exceedingly dangerous constructor 175 Address::Address(address loc, RelocationHolder spec) { 176 _base = noreg; 177 _index = noreg; 178 _scale = no_scale; 179 _disp = (intptr_t) loc; 180 _rspec = spec; 181 } 182 183 #endif // _LP64 184 185 186 187 // Convert the raw encoding form into the form expected by the constructor for 188 // Address. An index of 4 (rsp) corresponds to having no index, so convert 189 // that to noreg for the Address constructor. 190 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 191 RelocationHolder rspec; 192 if (disp_reloc != relocInfo::none) { 193 rspec = Relocation::spec_simple(disp_reloc); 194 } 195 bool valid_index = index != rsp->encoding(); 196 if (valid_index) { 197 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 198 madr._rspec = rspec; 199 return madr; 200 } else { 201 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 202 madr._rspec = rspec; 203 return madr; 204 } 205 } 206 207 // Implementation of Assembler 208 209 int AbstractAssembler::code_fill_byte() { 210 return (u_char)'\xF4'; // hlt 211 } 212 213 // make this go away someday 214 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { 215 if (rtype == relocInfo::none) 216 emit_int32(data); 217 else 218 emit_data(data, Relocation::spec_simple(rtype), format); 219 } 220 221 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { 222 assert(imm_operand == 0, "default format must be immediate in this file"); 223 assert(inst_mark() != NULL, "must be inside InstructionMark"); 224 if (rspec.type() != relocInfo::none) { 225 #ifdef ASSERT 226 check_relocation(rspec, format); 227 #endif 228 // Do not use AbstractAssembler::relocate, which is not intended for 229 // embedded words. Instead, relocate to the enclosing instruction. 230 231 // hack. call32 is too wide for mask so use disp32 232 if (format == call32_operand) 233 code_section()->relocate(inst_mark(), rspec, disp32_operand); 234 else 235 code_section()->relocate(inst_mark(), rspec, format); 236 } 237 emit_int32(data); 238 } 239 240 static int encode(Register r) { 241 int enc = r->encoding(); 242 if (enc >= 8) { 243 enc -= 8; 244 } 245 return enc; 246 } 247 248 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { 249 assert(dst->has_byte_register(), "must have byte register"); 250 assert(isByte(op1) && isByte(op2), "wrong opcode"); 251 assert(isByte(imm8), "not a byte"); 252 assert((op1 & 0x01) == 0, "should be 8bit operation"); 253 emit_int8(op1); 254 emit_int8(op2 | encode(dst)); 255 emit_int8(imm8); 256 } 257 258 259 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { 260 assert(isByte(op1) && isByte(op2), "wrong opcode"); 261 assert((op1 & 0x01) == 1, "should be 32bit operation"); 262 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 263 if (is8bit(imm32)) { 264 emit_int8(op1 | 0x02); // set sign bit 265 emit_int8(op2 | encode(dst)); 266 emit_int8(imm32 & 0xFF); 267 } else { 268 emit_int8(op1); 269 emit_int8(op2 | encode(dst)); 270 emit_int32(imm32); 271 } 272 } 273 274 // Force generation of a 4 byte immediate value even if it fits into 8bit 275 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { 276 assert(isByte(op1) && isByte(op2), "wrong opcode"); 277 assert((op1 & 0x01) == 1, "should be 32bit operation"); 278 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 279 emit_int8(op1); 280 emit_int8(op2 | encode(dst)); 281 emit_int32(imm32); 282 } 283 284 // immediate-to-memory forms 285 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { 286 assert((op1 & 0x01) == 1, "should be 32bit operation"); 287 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 288 if (is8bit(imm32)) { 289 emit_int8(op1 | 0x02); // set sign bit 290 emit_operand(rm, adr, 1); 291 emit_int8(imm32 & 0xFF); 292 } else { 293 emit_int8(op1); 294 emit_operand(rm, adr, 4); 295 emit_int32(imm32); 296 } 297 } 298 299 300 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { 301 assert(isByte(op1) && isByte(op2), "wrong opcode"); 302 emit_int8(op1); 303 emit_int8(op2 | encode(dst) << 3 | encode(src)); 304 } 305 306 307 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 308 int cur_tuple_type, int in_size_in_bits, int cur_encoding) { 309 int mod_idx = 0; 310 // We will test if the displacement fits the compressed format and if so 311 // apply the compression to the displacment iff the result is8bit. 312 if (VM_Version::supports_evex() && is_evex_inst) { 313 switch (cur_tuple_type) { 314 case EVEX_FV: 315 if ((cur_encoding & VEX_W) == VEX_W) { 316 mod_idx += 2 + ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 317 } else { 318 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 319 } 320 break; 321 322 case EVEX_HV: 323 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 324 break; 325 326 case EVEX_FVM: 327 break; 328 329 case EVEX_T1S: 330 switch (in_size_in_bits) { 331 case EVEX_8bit: 332 break; 333 334 case EVEX_16bit: 335 mod_idx = 1; 336 break; 337 338 case EVEX_32bit: 339 mod_idx = 2; 340 break; 341 342 case EVEX_64bit: 343 mod_idx = 3; 344 break; 345 } 346 break; 347 348 case EVEX_T1F: 349 case EVEX_T2: 350 case EVEX_T4: 351 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0; 352 break; 353 354 case EVEX_T8: 355 break; 356 357 case EVEX_HVM: 358 break; 359 360 case EVEX_QVM: 361 break; 362 363 case EVEX_OVM: 364 break; 365 366 case EVEX_M128: 367 break; 368 369 case EVEX_DUP: 370 break; 371 372 default: 373 assert(0, "no valid evex tuple_table entry"); 374 break; 375 } 376 377 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 378 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len]; 379 if ((disp % disp_factor) == 0) { 380 int new_disp = disp / disp_factor; 381 if ((-0x80 <= new_disp && new_disp < 0x80)) { 382 disp = new_disp; 383 } 384 } else { 385 return false; 386 } 387 } 388 } 389 return (-0x80 <= disp && disp < 0x80); 390 } 391 392 393 bool Assembler::emit_compressed_disp_byte(int &disp) { 394 int mod_idx = 0; 395 // We will test if the displacement fits the compressed format and if so 396 // apply the compression to the displacment iff the result is8bit. 397 if (VM_Version::supports_evex() && is_evex_instruction) { 398 switch (tuple_type) { 399 case EVEX_FV: 400 if ((evex_encoding & VEX_W) == VEX_W) { 401 mod_idx += 2 + ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 402 } else { 403 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 404 } 405 break; 406 407 case EVEX_HV: 408 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 409 break; 410 411 case EVEX_FVM: 412 break; 413 414 case EVEX_T1S: 415 switch (input_size_in_bits) { 416 case EVEX_8bit: 417 break; 418 419 case EVEX_16bit: 420 mod_idx = 1; 421 break; 422 423 case EVEX_32bit: 424 mod_idx = 2; 425 break; 426 427 case EVEX_64bit: 428 mod_idx = 3; 429 break; 430 } 431 break; 432 433 case EVEX_T1F: 434 case EVEX_T2: 435 case EVEX_T4: 436 mod_idx = (input_size_in_bits == EVEX_64bit) ? 1 : 0; 437 break; 438 439 case EVEX_T8: 440 break; 441 442 case EVEX_HVM: 443 break; 444 445 case EVEX_QVM: 446 break; 447 448 case EVEX_OVM: 449 break; 450 451 case EVEX_M128: 452 break; 453 454 case EVEX_DUP: 455 break; 456 457 default: 458 assert(0, "no valid evex tuple_table entry"); 459 break; 460 } 461 462 if (avx_vector_len >= AVX_128bit && avx_vector_len <= AVX_512bit) { 463 int disp_factor = tuple_table[tuple_type + mod_idx][avx_vector_len]; 464 if ((disp % disp_factor) == 0) { 465 int new_disp = disp / disp_factor; 466 if (is8bit(new_disp)) { 467 disp = new_disp; 468 } 469 } else { 470 return false; 471 } 472 } 473 } 474 return is8bit(disp); 475 } 476 477 478 void Assembler::emit_operand(Register reg, Register base, Register index, 479 Address::ScaleFactor scale, int disp, 480 RelocationHolder const& rspec, 481 int rip_relative_correction) { 482 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); 483 484 // Encode the registers as needed in the fields they are used in 485 486 int regenc = encode(reg) << 3; 487 int indexenc = index->is_valid() ? encode(index) << 3 : 0; 488 int baseenc = base->is_valid() ? encode(base) : 0; 489 490 if (base->is_valid()) { 491 if (index->is_valid()) { 492 assert(scale != Address::no_scale, "inconsistent address"); 493 // [base + index*scale + disp] 494 if (disp == 0 && rtype == relocInfo::none && 495 base != rbp LP64_ONLY(&& base != r13)) { 496 // [base + index*scale] 497 // [00 reg 100][ss index base] 498 assert(index != rsp, "illegal addressing mode"); 499 emit_int8(0x04 | regenc); 500 emit_int8(scale << 6 | indexenc | baseenc); 501 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 502 // [base + index*scale + imm8] 503 // [01 reg 100][ss index base] imm8 504 assert(index != rsp, "illegal addressing mode"); 505 emit_int8(0x44 | regenc); 506 emit_int8(scale << 6 | indexenc | baseenc); 507 emit_int8(disp & 0xFF); 508 } else { 509 // [base + index*scale + disp32] 510 // [10 reg 100][ss index base] disp32 511 assert(index != rsp, "illegal addressing mode"); 512 emit_int8(0x84 | regenc); 513 emit_int8(scale << 6 | indexenc | baseenc); 514 emit_data(disp, rspec, disp32_operand); 515 } 516 } else if (base == rsp LP64_ONLY(|| base == r12)) { 517 // [rsp + disp] 518 if (disp == 0 && rtype == relocInfo::none) { 519 // [rsp] 520 // [00 reg 100][00 100 100] 521 emit_int8(0x04 | regenc); 522 emit_int8(0x24); 523 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 524 // [rsp + imm8] 525 // [01 reg 100][00 100 100] disp8 526 emit_int8(0x44 | regenc); 527 emit_int8(0x24); 528 emit_int8(disp & 0xFF); 529 } else { 530 // [rsp + imm32] 531 // [10 reg 100][00 100 100] disp32 532 emit_int8(0x84 | regenc); 533 emit_int8(0x24); 534 emit_data(disp, rspec, disp32_operand); 535 } 536 } else { 537 // [base + disp] 538 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); 539 if (disp == 0 && rtype == relocInfo::none && 540 base != rbp LP64_ONLY(&& base != r13)) { 541 // [base] 542 // [00 reg base] 543 emit_int8(0x00 | regenc | baseenc); 544 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 545 // [base + disp8] 546 // [01 reg base] disp8 547 emit_int8(0x40 | regenc | baseenc); 548 emit_int8(disp & 0xFF); 549 } else { 550 // [base + disp32] 551 // [10 reg base] disp32 552 emit_int8(0x80 | regenc | baseenc); 553 emit_data(disp, rspec, disp32_operand); 554 } 555 } 556 } else { 557 if (index->is_valid()) { 558 assert(scale != Address::no_scale, "inconsistent address"); 559 // [index*scale + disp] 560 // [00 reg 100][ss index 101] disp32 561 assert(index != rsp, "illegal addressing mode"); 562 emit_int8(0x04 | regenc); 563 emit_int8(scale << 6 | indexenc | 0x05); 564 emit_data(disp, rspec, disp32_operand); 565 } else if (rtype != relocInfo::none ) { 566 // [disp] (64bit) RIP-RELATIVE (32bit) abs 567 // [00 000 101] disp32 568 569 emit_int8(0x05 | regenc); 570 // Note that the RIP-rel. correction applies to the generated 571 // disp field, but _not_ to the target address in the rspec. 572 573 // disp was created by converting the target address minus the pc 574 // at the start of the instruction. That needs more correction here. 575 // intptr_t disp = target - next_ip; 576 assert(inst_mark() != NULL, "must be inside InstructionMark"); 577 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; 578 int64_t adjusted = disp; 579 // Do rip-rel adjustment for 64bit 580 LP64_ONLY(adjusted -= (next_ip - inst_mark())); 581 assert(is_simm32(adjusted), 582 "must be 32bit offset (RIP relative address)"); 583 emit_data((int32_t) adjusted, rspec, disp32_operand); 584 585 } else { 586 // 32bit never did this, did everything as the rip-rel/disp code above 587 // [disp] ABSOLUTE 588 // [00 reg 100][00 100 101] disp32 589 emit_int8(0x04 | regenc); 590 emit_int8(0x25); 591 emit_data(disp, rspec, disp32_operand); 592 } 593 } 594 is_evex_instruction = false; 595 } 596 597 void Assembler::emit_operand(XMMRegister reg, Register base, Register index, 598 Address::ScaleFactor scale, int disp, 599 RelocationHolder const& rspec) { 600 if (UseAVX > 2) { 601 int xreg_enc = reg->encoding(); 602 if (xreg_enc > 15) { 603 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf); 604 emit_operand((Register)new_reg, base, index, scale, disp, rspec); 605 return; 606 } 607 } 608 emit_operand((Register)reg, base, index, scale, disp, rspec); 609 } 610 611 // Secret local extension to Assembler::WhichOperand: 612 #define end_pc_operand (_WhichOperand_limit) 613 614 address Assembler::locate_operand(address inst, WhichOperand which) { 615 // Decode the given instruction, and return the address of 616 // an embedded 32-bit operand word. 617 618 // If "which" is disp32_operand, selects the displacement portion 619 // of an effective address specifier. 620 // If "which" is imm64_operand, selects the trailing immediate constant. 621 // If "which" is call32_operand, selects the displacement of a call or jump. 622 // Caller is responsible for ensuring that there is such an operand, 623 // and that it is 32/64 bits wide. 624 625 // If "which" is end_pc_operand, find the end of the instruction. 626 627 address ip = inst; 628 bool is_64bit = false; 629 630 debug_only(bool has_disp32 = false); 631 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn 632 633 again_after_prefix: 634 switch (0xFF & *ip++) { 635 636 // These convenience macros generate groups of "case" labels for the switch. 637 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 638 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ 639 case (x)+4: case (x)+5: case (x)+6: case (x)+7 640 #define REP16(x) REP8((x)+0): \ 641 case REP8((x)+8) 642 643 case CS_segment: 644 case SS_segment: 645 case DS_segment: 646 case ES_segment: 647 case FS_segment: 648 case GS_segment: 649 // Seems dubious 650 LP64_ONLY(assert(false, "shouldn't have that prefix")); 651 assert(ip == inst+1, "only one prefix allowed"); 652 goto again_after_prefix; 653 654 case 0x67: 655 case REX: 656 case REX_B: 657 case REX_X: 658 case REX_XB: 659 case REX_R: 660 case REX_RB: 661 case REX_RX: 662 case REX_RXB: 663 NOT_LP64(assert(false, "64bit prefixes")); 664 goto again_after_prefix; 665 666 case REX_W: 667 case REX_WB: 668 case REX_WX: 669 case REX_WXB: 670 case REX_WR: 671 case REX_WRB: 672 case REX_WRX: 673 case REX_WRXB: 674 NOT_LP64(assert(false, "64bit prefixes")); 675 is_64bit = true; 676 goto again_after_prefix; 677 678 case 0xFF: // pushq a; decl a; incl a; call a; jmp a 679 case 0x88: // movb a, r 680 case 0x89: // movl a, r 681 case 0x8A: // movb r, a 682 case 0x8B: // movl r, a 683 case 0x8F: // popl a 684 debug_only(has_disp32 = true); 685 break; 686 687 case 0x68: // pushq #32 688 if (which == end_pc_operand) { 689 return ip + 4; 690 } 691 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); 692 return ip; // not produced by emit_operand 693 694 case 0x66: // movw ... (size prefix) 695 again_after_size_prefix2: 696 switch (0xFF & *ip++) { 697 case REX: 698 case REX_B: 699 case REX_X: 700 case REX_XB: 701 case REX_R: 702 case REX_RB: 703 case REX_RX: 704 case REX_RXB: 705 case REX_W: 706 case REX_WB: 707 case REX_WX: 708 case REX_WXB: 709 case REX_WR: 710 case REX_WRB: 711 case REX_WRX: 712 case REX_WRXB: 713 NOT_LP64(assert(false, "64bit prefix found")); 714 goto again_after_size_prefix2; 715 case 0x8B: // movw r, a 716 case 0x89: // movw a, r 717 debug_only(has_disp32 = true); 718 break; 719 case 0xC7: // movw a, #16 720 debug_only(has_disp32 = true); 721 tail_size = 2; // the imm16 722 break; 723 case 0x0F: // several SSE/SSE2 variants 724 ip--; // reparse the 0x0F 725 goto again_after_prefix; 726 default: 727 ShouldNotReachHere(); 728 } 729 break; 730 731 case REP8(0xB8): // movl/q r, #32/#64(oop?) 732 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); 733 // these asserts are somewhat nonsensical 734 #ifndef _LP64 735 assert(which == imm_operand || which == disp32_operand, 736 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip))); 737 #else 738 assert((which == call32_operand || which == imm_operand) && is_64bit || 739 which == narrow_oop_operand && !is_64bit, 740 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip))); 741 #endif // _LP64 742 return ip; 743 744 case 0x69: // imul r, a, #32 745 case 0xC7: // movl a, #32(oop?) 746 tail_size = 4; 747 debug_only(has_disp32 = true); // has both kinds of operands! 748 break; 749 750 case 0x0F: // movx..., etc. 751 switch (0xFF & *ip++) { 752 case 0x3A: // pcmpestri 753 tail_size = 1; 754 case 0x38: // ptest, pmovzxbw 755 ip++; // skip opcode 756 debug_only(has_disp32 = true); // has both kinds of operands! 757 break; 758 759 case 0x70: // pshufd r, r/a, #8 760 debug_only(has_disp32 = true); // has both kinds of operands! 761 case 0x73: // psrldq r, #8 762 tail_size = 1; 763 break; 764 765 case 0x12: // movlps 766 case 0x28: // movaps 767 case 0x2E: // ucomiss 768 case 0x2F: // comiss 769 case 0x54: // andps 770 case 0x55: // andnps 771 case 0x56: // orps 772 case 0x57: // xorps 773 case 0x6E: // movd 774 case 0x7E: // movd 775 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush 776 debug_only(has_disp32 = true); 777 break; 778 779 case 0xAD: // shrd r, a, %cl 780 case 0xAF: // imul r, a 781 case 0xBE: // movsbl r, a (movsxb) 782 case 0xBF: // movswl r, a (movsxw) 783 case 0xB6: // movzbl r, a (movzxb) 784 case 0xB7: // movzwl r, a (movzxw) 785 case REP16(0x40): // cmovl cc, r, a 786 case 0xB0: // cmpxchgb 787 case 0xB1: // cmpxchg 788 case 0xC1: // xaddl 789 case 0xC7: // cmpxchg8 790 case REP16(0x90): // setcc a 791 debug_only(has_disp32 = true); 792 // fall out of the switch to decode the address 793 break; 794 795 case 0xC4: // pinsrw r, a, #8 796 debug_only(has_disp32 = true); 797 case 0xC5: // pextrw r, r, #8 798 tail_size = 1; // the imm8 799 break; 800 801 case 0xAC: // shrd r, a, #8 802 debug_only(has_disp32 = true); 803 tail_size = 1; // the imm8 804 break; 805 806 case REP16(0x80): // jcc rdisp32 807 if (which == end_pc_operand) return ip + 4; 808 assert(which == call32_operand, "jcc has no disp32 or imm"); 809 return ip; 810 default: 811 ShouldNotReachHere(); 812 } 813 break; 814 815 case 0x81: // addl a, #32; addl r, #32 816 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 817 // on 32bit in the case of cmpl, the imm might be an oop 818 tail_size = 4; 819 debug_only(has_disp32 = true); // has both kinds of operands! 820 break; 821 822 case 0x83: // addl a, #8; addl r, #8 823 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 824 debug_only(has_disp32 = true); // has both kinds of operands! 825 tail_size = 1; 826 break; 827 828 case 0x9B: 829 switch (0xFF & *ip++) { 830 case 0xD9: // fnstcw a 831 debug_only(has_disp32 = true); 832 break; 833 default: 834 ShouldNotReachHere(); 835 } 836 break; 837 838 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a 839 case REP4(0x10): // adc... 840 case REP4(0x20): // and... 841 case REP4(0x30): // xor... 842 case REP4(0x08): // or... 843 case REP4(0x18): // sbb... 844 case REP4(0x28): // sub... 845 case 0xF7: // mull a 846 case 0x8D: // lea r, a 847 case 0x87: // xchg r, a 848 case REP4(0x38): // cmp... 849 case 0x85: // test r, a 850 debug_only(has_disp32 = true); // has both kinds of operands! 851 break; 852 853 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 854 case 0xC6: // movb a, #8 855 case 0x80: // cmpb a, #8 856 case 0x6B: // imul r, a, #8 857 debug_only(has_disp32 = true); // has both kinds of operands! 858 tail_size = 1; // the imm8 859 break; 860 861 case 0xC4: // VEX_3bytes 862 case 0xC5: // VEX_2bytes 863 assert((UseAVX > 0), "shouldn't have VEX prefix"); 864 assert(ip == inst+1, "no prefixes allowed"); 865 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions 866 // but they have prefix 0x0F and processed when 0x0F processed above. 867 // 868 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES 869 // instructions (these instructions are not supported in 64-bit mode). 870 // To distinguish them bits [7:6] are set in the VEX second byte since 871 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set 872 // those VEX bits REX and vvvv bits are inverted. 873 // 874 // Fortunately C2 doesn't generate these instructions so we don't need 875 // to check for them in product version. 876 877 // Check second byte 878 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions")); 879 880 // First byte 881 if ((0xFF & *inst) == VEX_3bytes) { 882 ip++; // third byte 883 is_64bit = ((VEX_W & *ip) == VEX_W); 884 } 885 ip++; // opcode 886 // To find the end of instruction (which == end_pc_operand). 887 switch (0xFF & *ip) { 888 case 0x61: // pcmpestri r, r/a, #8 889 case 0x70: // pshufd r, r/a, #8 890 case 0x73: // psrldq r, #8 891 tail_size = 1; // the imm8 892 break; 893 default: 894 break; 895 } 896 ip++; // skip opcode 897 debug_only(has_disp32 = true); // has both kinds of operands! 898 break; 899 900 case 0x62: // EVEX_4bytes 901 assert((UseAVX > 0), "shouldn't have EVEX prefix"); 902 assert(ip == inst+1, "no prefixes allowed"); 903 // no EVEX collisions, all instructions that have 0x62 opcodes 904 // have EVEX versions and are subopcodes of 0x66 905 ip++; // skip P0 and exmaine W in P1 906 is_64bit = ((VEX_W & *ip) == VEX_W); 907 ip++; // move to P2 908 ip++; // skip P2, move to opcode 909 // To find the end of instruction (which == end_pc_operand). 910 switch (0xFF & *ip) { 911 case 0x61: // pcmpestri r, r/a, #8 912 case 0x70: // pshufd r, r/a, #8 913 case 0x73: // psrldq r, #8 914 tail_size = 1; // the imm8 915 break; 916 default: 917 break; 918 } 919 ip++; // skip opcode 920 debug_only(has_disp32 = true); // has both kinds of operands! 921 break; 922 923 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 924 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl 925 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a 926 case 0xDD: // fld_d a; fst_d a; fstp_d a 927 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a 928 case 0xDF: // fild_d a; fistp_d a 929 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a 930 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a 931 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a 932 debug_only(has_disp32 = true); 933 break; 934 935 case 0xE8: // call rdisp32 936 case 0xE9: // jmp rdisp32 937 if (which == end_pc_operand) return ip + 4; 938 assert(which == call32_operand, "call has no disp32 or imm"); 939 return ip; 940 941 case 0xF0: // Lock 942 assert(os::is_MP(), "only on MP"); 943 goto again_after_prefix; 944 945 case 0xF3: // For SSE 946 case 0xF2: // For SSE2 947 switch (0xFF & *ip++) { 948 case REX: 949 case REX_B: 950 case REX_X: 951 case REX_XB: 952 case REX_R: 953 case REX_RB: 954 case REX_RX: 955 case REX_RXB: 956 case REX_W: 957 case REX_WB: 958 case REX_WX: 959 case REX_WXB: 960 case REX_WR: 961 case REX_WRB: 962 case REX_WRX: 963 case REX_WRXB: 964 NOT_LP64(assert(false, "found 64bit prefix")); 965 ip++; 966 default: 967 ip++; 968 } 969 debug_only(has_disp32 = true); // has both kinds of operands! 970 break; 971 972 default: 973 ShouldNotReachHere(); 974 975 #undef REP8 976 #undef REP16 977 } 978 979 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); 980 #ifdef _LP64 981 assert(which != imm_operand, "instruction is not a movq reg, imm64"); 982 #else 983 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); 984 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); 985 #endif // LP64 986 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); 987 988 // parse the output of emit_operand 989 int op2 = 0xFF & *ip++; 990 int base = op2 & 0x07; 991 int op3 = -1; 992 const int b100 = 4; 993 const int b101 = 5; 994 if (base == b100 && (op2 >> 6) != 3) { 995 op3 = 0xFF & *ip++; 996 base = op3 & 0x07; // refetch the base 997 } 998 // now ip points at the disp (if any) 999 1000 switch (op2 >> 6) { 1001 case 0: 1002 // [00 reg 100][ss index base] 1003 // [00 reg 100][00 100 esp] 1004 // [00 reg base] 1005 // [00 reg 100][ss index 101][disp32] 1006 // [00 reg 101] [disp32] 1007 1008 if (base == b101) { 1009 if (which == disp32_operand) 1010 return ip; // caller wants the disp32 1011 ip += 4; // skip the disp32 1012 } 1013 break; 1014 1015 case 1: 1016 // [01 reg 100][ss index base][disp8] 1017 // [01 reg 100][00 100 esp][disp8] 1018 // [01 reg base] [disp8] 1019 ip += 1; // skip the disp8 1020 break; 1021 1022 case 2: 1023 // [10 reg 100][ss index base][disp32] 1024 // [10 reg 100][00 100 esp][disp32] 1025 // [10 reg base] [disp32] 1026 if (which == disp32_operand) 1027 return ip; // caller wants the disp32 1028 ip += 4; // skip the disp32 1029 break; 1030 1031 case 3: 1032 // [11 reg base] (not a memory addressing mode) 1033 break; 1034 } 1035 1036 if (which == end_pc_operand) { 1037 return ip + tail_size; 1038 } 1039 1040 #ifdef _LP64 1041 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); 1042 #else 1043 assert(which == imm_operand, "instruction has only an imm field"); 1044 #endif // LP64 1045 return ip; 1046 } 1047 1048 address Assembler::locate_next_instruction(address inst) { 1049 // Secretly share code with locate_operand: 1050 return locate_operand(inst, end_pc_operand); 1051 } 1052 1053 1054 #ifdef ASSERT 1055 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { 1056 address inst = inst_mark(); 1057 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); 1058 address opnd; 1059 1060 Relocation* r = rspec.reloc(); 1061 if (r->type() == relocInfo::none) { 1062 return; 1063 } else if (r->is_call() || format == call32_operand) { 1064 // assert(format == imm32_operand, "cannot specify a nonzero format"); 1065 opnd = locate_operand(inst, call32_operand); 1066 } else if (r->is_data()) { 1067 assert(format == imm_operand || format == disp32_operand 1068 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); 1069 opnd = locate_operand(inst, (WhichOperand)format); 1070 } else { 1071 assert(format == imm_operand, "cannot specify a format"); 1072 return; 1073 } 1074 assert(opnd == pc(), "must put operand where relocs can find it"); 1075 } 1076 #endif // ASSERT 1077 1078 void Assembler::emit_operand32(Register reg, Address adr) { 1079 assert(reg->encoding() < 8, "no extended registers"); 1080 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1081 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1082 adr._rspec); 1083 } 1084 1085 void Assembler::emit_operand(Register reg, Address adr, 1086 int rip_relative_correction) { 1087 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1088 adr._rspec, 1089 rip_relative_correction); 1090 } 1091 1092 void Assembler::emit_operand(XMMRegister reg, Address adr) { 1093 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1094 adr._rspec); 1095 } 1096 1097 // MMX operations 1098 void Assembler::emit_operand(MMXRegister reg, Address adr) { 1099 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1100 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1101 } 1102 1103 // work around gcc (3.2.1-7a) bug 1104 void Assembler::emit_operand(Address adr, MMXRegister reg) { 1105 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1106 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1107 } 1108 1109 1110 void Assembler::emit_farith(int b1, int b2, int i) { 1111 assert(isByte(b1) && isByte(b2), "wrong opcode"); 1112 assert(0 <= i && i < 8, "illegal stack offset"); 1113 emit_int8(b1); 1114 emit_int8(b2 + i); 1115 } 1116 1117 1118 // Now the Assembler instructions (identical for 32/64 bits) 1119 1120 void Assembler::adcl(Address dst, int32_t imm32) { 1121 InstructionMark im(this); 1122 prefix(dst); 1123 emit_arith_operand(0x81, rdx, dst, imm32); 1124 } 1125 1126 void Assembler::adcl(Address dst, Register src) { 1127 InstructionMark im(this); 1128 prefix(dst, src); 1129 emit_int8(0x11); 1130 emit_operand(src, dst); 1131 } 1132 1133 void Assembler::adcl(Register dst, int32_t imm32) { 1134 prefix(dst); 1135 emit_arith(0x81, 0xD0, dst, imm32); 1136 } 1137 1138 void Assembler::adcl(Register dst, Address src) { 1139 InstructionMark im(this); 1140 prefix(src, dst); 1141 emit_int8(0x13); 1142 emit_operand(dst, src); 1143 } 1144 1145 void Assembler::adcl(Register dst, Register src) { 1146 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1147 emit_arith(0x13, 0xC0, dst, src); 1148 } 1149 1150 void Assembler::addl(Address dst, int32_t imm32) { 1151 InstructionMark im(this); 1152 prefix(dst); 1153 emit_arith_operand(0x81, rax, dst, imm32); 1154 } 1155 1156 void Assembler::addl(Address dst, Register src) { 1157 InstructionMark im(this); 1158 prefix(dst, src); 1159 emit_int8(0x01); 1160 emit_operand(src, dst); 1161 } 1162 1163 void Assembler::addl(Register dst, int32_t imm32) { 1164 prefix(dst); 1165 emit_arith(0x81, 0xC0, dst, imm32); 1166 } 1167 1168 void Assembler::addl(Register dst, Address src) { 1169 InstructionMark im(this); 1170 prefix(src, dst); 1171 emit_int8(0x03); 1172 emit_operand(dst, src); 1173 } 1174 1175 void Assembler::addl(Register dst, Register src) { 1176 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1177 emit_arith(0x03, 0xC0, dst, src); 1178 } 1179 1180 void Assembler::addr_nop_4() { 1181 assert(UseAddressNop, "no CPU support"); 1182 // 4 bytes: NOP DWORD PTR [EAX+0] 1183 emit_int8(0x0F); 1184 emit_int8(0x1F); 1185 emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); 1186 emit_int8(0); // 8-bits offset (1 byte) 1187 } 1188 1189 void Assembler::addr_nop_5() { 1190 assert(UseAddressNop, "no CPU support"); 1191 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset 1192 emit_int8(0x0F); 1193 emit_int8(0x1F); 1194 emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); 1195 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1196 emit_int8(0); // 8-bits offset (1 byte) 1197 } 1198 1199 void Assembler::addr_nop_7() { 1200 assert(UseAddressNop, "no CPU support"); 1201 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset 1202 emit_int8(0x0F); 1203 emit_int8(0x1F); 1204 emit_int8((unsigned char)0x80); 1205 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); 1206 emit_int32(0); // 32-bits offset (4 bytes) 1207 } 1208 1209 void Assembler::addr_nop_8() { 1210 assert(UseAddressNop, "no CPU support"); 1211 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset 1212 emit_int8(0x0F); 1213 emit_int8(0x1F); 1214 emit_int8((unsigned char)0x84); 1215 // emit_rm(cbuf, 0x2, EAX_enc, 0x4); 1216 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1217 emit_int32(0); // 32-bits offset (4 bytes) 1218 } 1219 1220 void Assembler::addsd(XMMRegister dst, XMMRegister src) { 1221 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1222 if (VM_Version::supports_evex()) { 1223 emit_simd_arith_q(0x58, dst, src, VEX_SIMD_F2); 1224 } else { 1225 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2); 1226 } 1227 } 1228 1229 void Assembler::addsd(XMMRegister dst, Address src) { 1230 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1231 if (VM_Version::supports_evex()) { 1232 tuple_type = EVEX_T1S; 1233 input_size_in_bits = EVEX_64bit; 1234 emit_simd_arith_q(0x58, dst, src, VEX_SIMD_F2); 1235 } else { 1236 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2); 1237 } 1238 } 1239 1240 void Assembler::addss(XMMRegister dst, XMMRegister src) { 1241 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1242 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3); 1243 } 1244 1245 void Assembler::addss(XMMRegister dst, Address src) { 1246 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1247 if (VM_Version::supports_evex()) { 1248 tuple_type = EVEX_T1S; 1249 input_size_in_bits = EVEX_32bit; 1250 } 1251 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3); 1252 } 1253 1254 void Assembler::aesdec(XMMRegister dst, Address src) { 1255 assert(VM_Version::supports_aes(), ""); 1256 InstructionMark im(this); 1257 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1258 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1259 emit_int8((unsigned char)0xDE); 1260 emit_operand(dst, src); 1261 } 1262 1263 void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1264 assert(VM_Version::supports_aes(), ""); 1265 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1266 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1267 emit_int8((unsigned char)0xDE); 1268 emit_int8(0xC0 | encode); 1269 } 1270 1271 void Assembler::aesdeclast(XMMRegister dst, Address src) { 1272 assert(VM_Version::supports_aes(), ""); 1273 InstructionMark im(this); 1274 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1275 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1276 emit_int8((unsigned char)0xDF); 1277 emit_operand(dst, src); 1278 } 1279 1280 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1281 assert(VM_Version::supports_aes(), ""); 1282 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1283 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1284 emit_int8((unsigned char)0xDF); 1285 emit_int8((unsigned char)(0xC0 | encode)); 1286 } 1287 1288 void Assembler::aesenc(XMMRegister dst, Address src) { 1289 assert(VM_Version::supports_aes(), ""); 1290 InstructionMark im(this); 1291 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1292 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1293 emit_int8((unsigned char)0xDC); 1294 emit_operand(dst, src); 1295 } 1296 1297 void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1298 assert(VM_Version::supports_aes(), ""); 1299 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1300 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1301 emit_int8((unsigned char)0xDC); 1302 emit_int8(0xC0 | encode); 1303 } 1304 1305 void Assembler::aesenclast(XMMRegister dst, Address src) { 1306 assert(VM_Version::supports_aes(), ""); 1307 InstructionMark im(this); 1308 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1309 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1310 emit_int8((unsigned char)0xDD); 1311 emit_operand(dst, src); 1312 } 1313 1314 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1315 assert(VM_Version::supports_aes(), ""); 1316 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1317 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1318 emit_int8((unsigned char)0xDD); 1319 emit_int8((unsigned char)(0xC0 | encode)); 1320 } 1321 1322 1323 void Assembler::andl(Address dst, int32_t imm32) { 1324 InstructionMark im(this); 1325 prefix(dst); 1326 emit_int8((unsigned char)0x81); 1327 emit_operand(rsp, dst, 4); 1328 emit_int32(imm32); 1329 } 1330 1331 void Assembler::andl(Register dst, int32_t imm32) { 1332 prefix(dst); 1333 emit_arith(0x81, 0xE0, dst, imm32); 1334 } 1335 1336 void Assembler::andl(Register dst, Address src) { 1337 InstructionMark im(this); 1338 prefix(src, dst); 1339 emit_int8(0x23); 1340 emit_operand(dst, src); 1341 } 1342 1343 void Assembler::andl(Register dst, Register src) { 1344 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1345 emit_arith(0x23, 0xC0, dst, src); 1346 } 1347 1348 void Assembler::andnl(Register dst, Register src1, Register src2) { 1349 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1350 int encode = vex_prefix_0F38_and_encode_legacy(dst, src1, src2, false); 1351 emit_int8((unsigned char)0xF2); 1352 emit_int8((unsigned char)(0xC0 | encode)); 1353 } 1354 1355 void Assembler::andnl(Register dst, Register src1, Address src2) { 1356 InstructionMark im(this); 1357 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1358 vex_prefix_0F38_legacy(dst, src1, src2, false); 1359 emit_int8((unsigned char)0xF2); 1360 emit_operand(dst, src2); 1361 } 1362 1363 void Assembler::bsfl(Register dst, Register src) { 1364 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1365 emit_int8(0x0F); 1366 emit_int8((unsigned char)0xBC); 1367 emit_int8((unsigned char)(0xC0 | encode)); 1368 } 1369 1370 void Assembler::bsrl(Register dst, Register src) { 1371 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1372 emit_int8(0x0F); 1373 emit_int8((unsigned char)0xBD); 1374 emit_int8((unsigned char)(0xC0 | encode)); 1375 } 1376 1377 void Assembler::bswapl(Register reg) { // bswap 1378 int encode = prefix_and_encode(reg->encoding()); 1379 emit_int8(0x0F); 1380 emit_int8((unsigned char)(0xC8 | encode)); 1381 } 1382 1383 void Assembler::blsil(Register dst, Register src) { 1384 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1385 int encode = vex_prefix_0F38_and_encode_legacy(rbx, dst, src, false); 1386 emit_int8((unsigned char)0xF3); 1387 emit_int8((unsigned char)(0xC0 | encode)); 1388 } 1389 1390 void Assembler::blsil(Register dst, Address src) { 1391 InstructionMark im(this); 1392 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1393 vex_prefix_0F38_legacy(rbx, dst, src, false); 1394 emit_int8((unsigned char)0xF3); 1395 emit_operand(rbx, src); 1396 } 1397 1398 void Assembler::blsmskl(Register dst, Register src) { 1399 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1400 int encode = vex_prefix_0F38_and_encode_legacy(rdx, dst, src, false); 1401 emit_int8((unsigned char)0xF3); 1402 emit_int8((unsigned char)(0xC0 | encode)); 1403 } 1404 1405 void Assembler::blsmskl(Register dst, Address src) { 1406 InstructionMark im(this); 1407 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1408 vex_prefix_0F38(rdx, dst, src, false); 1409 emit_int8((unsigned char)0xF3); 1410 emit_operand(rdx, src); 1411 } 1412 1413 void Assembler::blsrl(Register dst, Register src) { 1414 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1415 int encode = vex_prefix_0F38_and_encode_legacy(rcx, dst, src, false); 1416 emit_int8((unsigned char)0xF3); 1417 emit_int8((unsigned char)(0xC0 | encode)); 1418 } 1419 1420 void Assembler::blsrl(Register dst, Address src) { 1421 InstructionMark im(this); 1422 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1423 vex_prefix_0F38_legacy(rcx, dst, src, false); 1424 emit_int8((unsigned char)0xF3); 1425 emit_operand(rcx, src); 1426 } 1427 1428 void Assembler::call(Label& L, relocInfo::relocType rtype) { 1429 // suspect disp32 is always good 1430 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); 1431 1432 if (L.is_bound()) { 1433 const int long_size = 5; 1434 int offs = (int)( target(L) - pc() ); 1435 assert(offs <= 0, "assembler error"); 1436 InstructionMark im(this); 1437 // 1110 1000 #32-bit disp 1438 emit_int8((unsigned char)0xE8); 1439 emit_data(offs - long_size, rtype, operand); 1440 } else { 1441 InstructionMark im(this); 1442 // 1110 1000 #32-bit disp 1443 L.add_patch_at(code(), locator()); 1444 1445 emit_int8((unsigned char)0xE8); 1446 emit_data(int(0), rtype, operand); 1447 } 1448 } 1449 1450 void Assembler::call(Register dst) { 1451 int encode = prefix_and_encode(dst->encoding()); 1452 emit_int8((unsigned char)0xFF); 1453 emit_int8((unsigned char)(0xD0 | encode)); 1454 } 1455 1456 1457 void Assembler::call(Address adr) { 1458 InstructionMark im(this); 1459 prefix(adr); 1460 emit_int8((unsigned char)0xFF); 1461 emit_operand(rdx, adr); 1462 } 1463 1464 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1465 assert(entry != NULL, "call most probably wrong"); 1466 InstructionMark im(this); 1467 emit_int8((unsigned char)0xE8); 1468 intptr_t disp = entry - (pc() + sizeof(int32_t)); 1469 assert(is_simm32(disp), "must be 32bit offset (call2)"); 1470 // Technically, should use call32_operand, but this format is 1471 // implied by the fact that we're emitting a call instruction. 1472 1473 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1474 emit_data((int) disp, rspec, operand); 1475 } 1476 1477 void Assembler::cdql() { 1478 emit_int8((unsigned char)0x99); 1479 } 1480 1481 void Assembler::cld() { 1482 emit_int8((unsigned char)0xFC); 1483 } 1484 1485 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1486 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1487 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1488 emit_int8(0x0F); 1489 emit_int8(0x40 | cc); 1490 emit_int8((unsigned char)(0xC0 | encode)); 1491 } 1492 1493 1494 void Assembler::cmovl(Condition cc, Register dst, Address src) { 1495 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1496 prefix(src, dst); 1497 emit_int8(0x0F); 1498 emit_int8(0x40 | cc); 1499 emit_operand(dst, src); 1500 } 1501 1502 void Assembler::cmpb(Address dst, int imm8) { 1503 InstructionMark im(this); 1504 prefix(dst); 1505 emit_int8((unsigned char)0x80); 1506 emit_operand(rdi, dst, 1); 1507 emit_int8(imm8); 1508 } 1509 1510 void Assembler::cmpl(Address dst, int32_t imm32) { 1511 InstructionMark im(this); 1512 prefix(dst); 1513 emit_int8((unsigned char)0x81); 1514 emit_operand(rdi, dst, 4); 1515 emit_int32(imm32); 1516 } 1517 1518 void Assembler::cmpl(Register dst, int32_t imm32) { 1519 prefix(dst); 1520 emit_arith(0x81, 0xF8, dst, imm32); 1521 } 1522 1523 void Assembler::cmpl(Register dst, Register src) { 1524 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1525 emit_arith(0x3B, 0xC0, dst, src); 1526 } 1527 1528 1529 void Assembler::cmpl(Register dst, Address src) { 1530 InstructionMark im(this); 1531 prefix(src, dst); 1532 emit_int8((unsigned char)0x3B); 1533 emit_operand(dst, src); 1534 } 1535 1536 void Assembler::cmpw(Address dst, int imm16) { 1537 InstructionMark im(this); 1538 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); 1539 emit_int8(0x66); 1540 emit_int8((unsigned char)0x81); 1541 emit_operand(rdi, dst, 2); 1542 emit_int16(imm16); 1543 } 1544 1545 // The 32-bit cmpxchg compares the value at adr with the contents of rax, 1546 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1547 // The ZF is set if the compared values were equal, and cleared otherwise. 1548 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg 1549 InstructionMark im(this); 1550 prefix(adr, reg); 1551 emit_int8(0x0F); 1552 emit_int8((unsigned char)0xB1); 1553 emit_operand(reg, adr); 1554 } 1555 1556 // The 8-bit cmpxchg compares the value at adr with the contents of rax, 1557 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1558 // The ZF is set if the compared values were equal, and cleared otherwise. 1559 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg 1560 InstructionMark im(this); 1561 prefix(adr, reg, true); 1562 emit_int8(0x0F); 1563 emit_int8((unsigned char)0xB0); 1564 emit_operand(reg, adr); 1565 } 1566 1567 void Assembler::comisd(XMMRegister dst, Address src) { 1568 // NOTE: dbx seems to decode this as comiss even though the 1569 // 0x66 is there. Strangly ucomisd comes out correct 1570 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1571 if (VM_Version::supports_evex()) { 1572 tuple_type = EVEX_T1S; 1573 input_size_in_bits = EVEX_64bit; 1574 emit_simd_arith_nonds_q(0x2F, dst, src, VEX_SIMD_66, true); 1575 } else { 1576 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66); 1577 } 1578 } 1579 1580 void Assembler::comisd(XMMRegister dst, XMMRegister src) { 1581 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1582 if (VM_Version::supports_evex()) { 1583 emit_simd_arith_nonds_q(0x2F, dst, src, VEX_SIMD_66, true); 1584 } else { 1585 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66); 1586 } 1587 } 1588 1589 void Assembler::comiss(XMMRegister dst, Address src) { 1590 if (VM_Version::supports_evex()) { 1591 tuple_type = EVEX_T1S; 1592 input_size_in_bits = EVEX_32bit; 1593 } 1594 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1595 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE, true); 1596 } 1597 1598 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1599 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1600 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE, true); 1601 } 1602 1603 void Assembler::cpuid() { 1604 emit_int8(0x0F); 1605 emit_int8((unsigned char)0xA2); 1606 } 1607 1608 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1609 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1610 emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3); 1611 } 1612 1613 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { 1614 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1615 emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE); 1616 } 1617 1618 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1619 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1620 if (VM_Version::supports_evex()) { 1621 emit_simd_arith_q(0x5A, dst, src, VEX_SIMD_F2); 1622 } else { 1623 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2); 1624 } 1625 } 1626 1627 void Assembler::cvtsd2ss(XMMRegister dst, Address src) { 1628 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1629 if (VM_Version::supports_evex()) { 1630 tuple_type = EVEX_T1F; 1631 input_size_in_bits = EVEX_64bit; 1632 emit_simd_arith_q(0x5A, dst, src, VEX_SIMD_F2); 1633 } else { 1634 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2); 1635 } 1636 } 1637 1638 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { 1639 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1640 int encode = 0; 1641 if (VM_Version::supports_evex()) { 1642 encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2, true); 1643 } else { 1644 encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, false); 1645 } 1646 emit_int8(0x2A); 1647 emit_int8((unsigned char)(0xC0 | encode)); 1648 } 1649 1650 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { 1651 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1652 if (VM_Version::supports_evex()) { 1653 tuple_type = EVEX_T1S; 1654 input_size_in_bits = EVEX_32bit; 1655 emit_simd_arith_q(0x2A, dst, src, VEX_SIMD_F2, true); 1656 } else { 1657 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2); 1658 } 1659 } 1660 1661 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { 1662 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1663 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, true); 1664 emit_int8(0x2A); 1665 emit_int8((unsigned char)(0xC0 | encode)); 1666 } 1667 1668 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { 1669 if (VM_Version::supports_evex()) { 1670 tuple_type = EVEX_T1S; 1671 input_size_in_bits = EVEX_32bit; 1672 } 1673 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1674 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3, true); 1675 } 1676 1677 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 1678 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1679 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3); 1680 } 1681 1682 void Assembler::cvtss2sd(XMMRegister dst, Address src) { 1683 if (VM_Version::supports_evex()) { 1684 tuple_type = EVEX_T1S; 1685 input_size_in_bits = EVEX_32bit; 1686 } 1687 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1688 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3); 1689 } 1690 1691 1692 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { 1693 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1694 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, true); 1695 emit_int8(0x2C); 1696 emit_int8((unsigned char)(0xC0 | encode)); 1697 } 1698 1699 void Assembler::cvttss2sil(Register dst, XMMRegister src) { 1700 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1701 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, true); 1702 emit_int8(0x2C); 1703 emit_int8((unsigned char)(0xC0 | encode)); 1704 } 1705 1706 void Assembler::decl(Address dst) { 1707 // Don't use it directly. Use MacroAssembler::decrement() instead. 1708 InstructionMark im(this); 1709 prefix(dst); 1710 emit_int8((unsigned char)0xFF); 1711 emit_operand(rcx, dst); 1712 } 1713 1714 void Assembler::divsd(XMMRegister dst, Address src) { 1715 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1716 if (VM_Version::supports_evex()) { 1717 tuple_type = EVEX_T1S; 1718 input_size_in_bits = EVEX_64bit; 1719 emit_simd_arith_q(0x5E, dst, src, VEX_SIMD_F2); 1720 } else { 1721 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2); 1722 } 1723 } 1724 1725 void Assembler::divsd(XMMRegister dst, XMMRegister src) { 1726 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1727 if (VM_Version::supports_evex()) { 1728 emit_simd_arith_q(0x5E, dst, src, VEX_SIMD_F2); 1729 } else { 1730 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2); 1731 } 1732 } 1733 1734 void Assembler::divss(XMMRegister dst, Address src) { 1735 if (VM_Version::supports_evex()) { 1736 tuple_type = EVEX_T1S; 1737 input_size_in_bits = EVEX_32bit; 1738 } 1739 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1740 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3); 1741 } 1742 1743 void Assembler::divss(XMMRegister dst, XMMRegister src) { 1744 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1745 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3); 1746 } 1747 1748 void Assembler::emms() { 1749 NOT_LP64(assert(VM_Version::supports_mmx(), "")); 1750 emit_int8(0x0F); 1751 emit_int8(0x77); 1752 } 1753 1754 void Assembler::hlt() { 1755 emit_int8((unsigned char)0xF4); 1756 } 1757 1758 void Assembler::idivl(Register src) { 1759 int encode = prefix_and_encode(src->encoding()); 1760 emit_int8((unsigned char)0xF7); 1761 emit_int8((unsigned char)(0xF8 | encode)); 1762 } 1763 1764 void Assembler::divl(Register src) { // Unsigned 1765 int encode = prefix_and_encode(src->encoding()); 1766 emit_int8((unsigned char)0xF7); 1767 emit_int8((unsigned char)(0xF0 | encode)); 1768 } 1769 1770 void Assembler::imull(Register dst, Register src) { 1771 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1772 emit_int8(0x0F); 1773 emit_int8((unsigned char)0xAF); 1774 emit_int8((unsigned char)(0xC0 | encode)); 1775 } 1776 1777 1778 void Assembler::imull(Register dst, Register src, int value) { 1779 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1780 if (is8bit(value)) { 1781 emit_int8(0x6B); 1782 emit_int8((unsigned char)(0xC0 | encode)); 1783 emit_int8(value & 0xFF); 1784 } else { 1785 emit_int8(0x69); 1786 emit_int8((unsigned char)(0xC0 | encode)); 1787 emit_int32(value); 1788 } 1789 } 1790 1791 void Assembler::imull(Register dst, Address src) { 1792 InstructionMark im(this); 1793 prefix(src, dst); 1794 emit_int8(0x0F); 1795 emit_int8((unsigned char) 0xAF); 1796 emit_operand(dst, src); 1797 } 1798 1799 1800 void Assembler::incl(Address dst) { 1801 // Don't use it directly. Use MacroAssembler::increment() instead. 1802 InstructionMark im(this); 1803 prefix(dst); 1804 emit_int8((unsigned char)0xFF); 1805 emit_operand(rax, dst); 1806 } 1807 1808 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { 1809 InstructionMark im(this); 1810 assert((0 <= cc) && (cc < 16), "illegal cc"); 1811 if (L.is_bound()) { 1812 address dst = target(L); 1813 assert(dst != NULL, "jcc most probably wrong"); 1814 1815 const int short_size = 2; 1816 const int long_size = 6; 1817 intptr_t offs = (intptr_t)dst - (intptr_t)pc(); 1818 if (maybe_short && is8bit(offs - short_size)) { 1819 // 0111 tttn #8-bit disp 1820 emit_int8(0x70 | cc); 1821 emit_int8((offs - short_size) & 0xFF); 1822 } else { 1823 // 0000 1111 1000 tttn #32-bit disp 1824 assert(is_simm32(offs - long_size), 1825 "must be 32bit offset (call4)"); 1826 emit_int8(0x0F); 1827 emit_int8((unsigned char)(0x80 | cc)); 1828 emit_int32(offs - long_size); 1829 } 1830 } else { 1831 // Note: could eliminate cond. jumps to this jump if condition 1832 // is the same however, seems to be rather unlikely case. 1833 // Note: use jccb() if label to be bound is very close to get 1834 // an 8-bit displacement 1835 L.add_patch_at(code(), locator()); 1836 emit_int8(0x0F); 1837 emit_int8((unsigned char)(0x80 | cc)); 1838 emit_int32(0); 1839 } 1840 } 1841 1842 void Assembler::jccb(Condition cc, Label& L) { 1843 if (L.is_bound()) { 1844 const int short_size = 2; 1845 address entry = target(L); 1846 #ifdef ASSERT 1847 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 1848 intptr_t delta = short_branch_delta(); 1849 if (delta != 0) { 1850 dist += (dist < 0 ? (-delta) :delta); 1851 } 1852 assert(is8bit(dist), "Dispacement too large for a short jmp"); 1853 #endif 1854 intptr_t offs = (intptr_t)entry - (intptr_t)pc(); 1855 // 0111 tttn #8-bit disp 1856 emit_int8(0x70 | cc); 1857 emit_int8((offs - short_size) & 0xFF); 1858 } else { 1859 InstructionMark im(this); 1860 L.add_patch_at(code(), locator()); 1861 emit_int8(0x70 | cc); 1862 emit_int8(0); 1863 } 1864 } 1865 1866 void Assembler::jmp(Address adr) { 1867 InstructionMark im(this); 1868 prefix(adr); 1869 emit_int8((unsigned char)0xFF); 1870 emit_operand(rsp, adr); 1871 } 1872 1873 void Assembler::jmp(Label& L, bool maybe_short) { 1874 if (L.is_bound()) { 1875 address entry = target(L); 1876 assert(entry != NULL, "jmp most probably wrong"); 1877 InstructionMark im(this); 1878 const int short_size = 2; 1879 const int long_size = 5; 1880 intptr_t offs = entry - pc(); 1881 if (maybe_short && is8bit(offs - short_size)) { 1882 emit_int8((unsigned char)0xEB); 1883 emit_int8((offs - short_size) & 0xFF); 1884 } else { 1885 emit_int8((unsigned char)0xE9); 1886 emit_int32(offs - long_size); 1887 } 1888 } else { 1889 // By default, forward jumps are always 32-bit displacements, since 1890 // we can't yet know where the label will be bound. If you're sure that 1891 // the forward jump will not run beyond 256 bytes, use jmpb to 1892 // force an 8-bit displacement. 1893 InstructionMark im(this); 1894 L.add_patch_at(code(), locator()); 1895 emit_int8((unsigned char)0xE9); 1896 emit_int32(0); 1897 } 1898 } 1899 1900 void Assembler::jmp(Register entry) { 1901 int encode = prefix_and_encode(entry->encoding()); 1902 emit_int8((unsigned char)0xFF); 1903 emit_int8((unsigned char)(0xE0 | encode)); 1904 } 1905 1906 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 1907 InstructionMark im(this); 1908 emit_int8((unsigned char)0xE9); 1909 assert(dest != NULL, "must have a target"); 1910 intptr_t disp = dest - (pc() + sizeof(int32_t)); 1911 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 1912 emit_data(disp, rspec.reloc(), call32_operand); 1913 } 1914 1915 void Assembler::jmpb(Label& L) { 1916 if (L.is_bound()) { 1917 const int short_size = 2; 1918 address entry = target(L); 1919 assert(entry != NULL, "jmp most probably wrong"); 1920 #ifdef ASSERT 1921 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 1922 intptr_t delta = short_branch_delta(); 1923 if (delta != 0) { 1924 dist += (dist < 0 ? (-delta) :delta); 1925 } 1926 assert(is8bit(dist), "Dispacement too large for a short jmp"); 1927 #endif 1928 intptr_t offs = entry - pc(); 1929 emit_int8((unsigned char)0xEB); 1930 emit_int8((offs - short_size) & 0xFF); 1931 } else { 1932 InstructionMark im(this); 1933 L.add_patch_at(code(), locator()); 1934 emit_int8((unsigned char)0xEB); 1935 emit_int8(0); 1936 } 1937 } 1938 1939 void Assembler::ldmxcsr( Address src) { 1940 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1941 InstructionMark im(this); 1942 prefix(src); 1943 emit_int8(0x0F); 1944 emit_int8((unsigned char)0xAE); 1945 emit_operand(as_Register(2), src); 1946 } 1947 1948 void Assembler::leal(Register dst, Address src) { 1949 InstructionMark im(this); 1950 #ifdef _LP64 1951 emit_int8(0x67); // addr32 1952 prefix(src, dst); 1953 #endif // LP64 1954 emit_int8((unsigned char)0x8D); 1955 emit_operand(dst, src); 1956 } 1957 1958 void Assembler::lfence() { 1959 emit_int8(0x0F); 1960 emit_int8((unsigned char)0xAE); 1961 emit_int8((unsigned char)0xE8); 1962 } 1963 1964 void Assembler::lock() { 1965 emit_int8((unsigned char)0xF0); 1966 } 1967 1968 void Assembler::lzcntl(Register dst, Register src) { 1969 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 1970 emit_int8((unsigned char)0xF3); 1971 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1972 emit_int8(0x0F); 1973 emit_int8((unsigned char)0xBD); 1974 emit_int8((unsigned char)(0xC0 | encode)); 1975 } 1976 1977 // Emit mfence instruction 1978 void Assembler::mfence() { 1979 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 1980 emit_int8(0x0F); 1981 emit_int8((unsigned char)0xAE); 1982 emit_int8((unsigned char)0xF0); 1983 } 1984 1985 void Assembler::mov(Register dst, Register src) { 1986 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 1987 } 1988 1989 void Assembler::movapd(XMMRegister dst, XMMRegister src) { 1990 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1991 if (VM_Version::supports_evex()) { 1992 emit_simd_arith_nonds_q(0x28, dst, src, VEX_SIMD_66, true); 1993 } else { 1994 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66); 1995 } 1996 } 1997 1998 void Assembler::movaps(XMMRegister dst, XMMRegister src) { 1999 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2000 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE); 2001 } 2002 2003 void Assembler::movlhps(XMMRegister dst, XMMRegister src) { 2004 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2005 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, true, VEX_OPCODE_0F, 2006 false, AVX_128bit); 2007 emit_int8(0x16); 2008 emit_int8((unsigned char)(0xC0 | encode)); 2009 } 2010 2011 void Assembler::movb(Register dst, Address src) { 2012 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 2013 InstructionMark im(this); 2014 prefix(src, dst, true); 2015 emit_int8((unsigned char)0x8A); 2016 emit_operand(dst, src); 2017 } 2018 2019 void Assembler::kmovq(KRegister dst, KRegister src) { 2020 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2021 int encode = kreg_prefix_and_encode(dst, knoreg, src, VEX_SIMD_NONE, 2022 true, VEX_OPCODE_0F, true); 2023 emit_int8((unsigned char)0x90); 2024 emit_int8((unsigned char)(0xC0 | encode)); 2025 } 2026 2027 void Assembler::kmovq(KRegister dst, Address src) { 2028 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2029 int dst_enc = dst->encoding(); 2030 int nds_enc = 0; 2031 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_NONE, 2032 VEX_OPCODE_0F, true, AVX_128bit, true, true); 2033 emit_int8((unsigned char)0x90); 2034 emit_operand((Register)dst, src); 2035 } 2036 2037 void Assembler::kmovq(Address dst, KRegister src) { 2038 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2039 int src_enc = src->encoding(); 2040 int nds_enc = 0; 2041 vex_prefix(dst, nds_enc, src_enc, VEX_SIMD_NONE, 2042 VEX_OPCODE_0F, true, AVX_128bit, true, true); 2043 emit_int8((unsigned char)0x90); 2044 emit_operand((Register)src, dst); 2045 } 2046 2047 void Assembler::kmovql(KRegister dst, Register src) { 2048 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2049 bool supports_bw = VM_Version::supports_avx512bw(); 2050 VexSimdPrefix pre = supports_bw ? VEX_SIMD_F2 : VEX_SIMD_NONE; 2051 int encode = kreg_prefix_and_encode(dst, knoreg, src, pre, true, 2052 VEX_OPCODE_0F, supports_bw); 2053 emit_int8((unsigned char)0x92); 2054 emit_int8((unsigned char)(0xC0 | encode)); 2055 } 2056 2057 void Assembler::kmovdl(KRegister dst, Register src) { 2058 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2059 VexSimdPrefix pre = VM_Version::supports_avx512bw() ? VEX_SIMD_F2 : VEX_SIMD_NONE; 2060 int encode = kreg_prefix_and_encode(dst, knoreg, src, pre, true, VEX_OPCODE_0F, false); 2061 emit_int8((unsigned char)0x92); 2062 emit_int8((unsigned char)(0xC0 | encode)); 2063 } 2064 2065 void Assembler::movb(Address dst, int imm8) { 2066 InstructionMark im(this); 2067 prefix(dst); 2068 emit_int8((unsigned char)0xC6); 2069 emit_operand(rax, dst, 1); 2070 emit_int8(imm8); 2071 } 2072 2073 2074 void Assembler::movb(Address dst, Register src) { 2075 assert(src->has_byte_register(), "must have byte register"); 2076 InstructionMark im(this); 2077 prefix(dst, src, true); 2078 emit_int8((unsigned char)0x88); 2079 emit_operand(src, dst); 2080 } 2081 2082 void Assembler::movdl(XMMRegister dst, Register src) { 2083 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2084 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66, true); 2085 emit_int8(0x6E); 2086 emit_int8((unsigned char)(0xC0 | encode)); 2087 } 2088 2089 void Assembler::movdl(Register dst, XMMRegister src) { 2090 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2091 // swap src/dst to get correct prefix 2092 int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66, true); 2093 emit_int8(0x7E); 2094 emit_int8((unsigned char)(0xC0 | encode)); 2095 } 2096 2097 void Assembler::movdl(XMMRegister dst, Address src) { 2098 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2099 if (VM_Version::supports_evex()) { 2100 tuple_type = EVEX_T1S; 2101 input_size_in_bits = EVEX_32bit; 2102 } 2103 InstructionMark im(this); 2104 simd_prefix(dst, src, VEX_SIMD_66, true, VEX_OPCODE_0F); 2105 emit_int8(0x6E); 2106 emit_operand(dst, src); 2107 } 2108 2109 void Assembler::movdl(Address dst, XMMRegister src) { 2110 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2111 if (VM_Version::supports_evex()) { 2112 tuple_type = EVEX_T1S; 2113 input_size_in_bits = EVEX_32bit; 2114 } 2115 InstructionMark im(this); 2116 simd_prefix(dst, src, VEX_SIMD_66, true); 2117 emit_int8(0x7E); 2118 emit_operand(src, dst); 2119 } 2120 2121 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { 2122 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2123 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); 2124 } 2125 2126 void Assembler::movdqa(XMMRegister dst, Address src) { 2127 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2128 if (VM_Version::supports_evex()) { 2129 tuple_type = EVEX_FVM; 2130 } 2131 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); 2132 } 2133 2134 void Assembler::movdqu(XMMRegister dst, Address src) { 2135 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2136 if (VM_Version::supports_evex()) { 2137 tuple_type = EVEX_FVM; 2138 } 2139 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3); 2140 } 2141 2142 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { 2143 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2144 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3); 2145 } 2146 2147 void Assembler::movdqu(Address dst, XMMRegister src) { 2148 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2149 if (VM_Version::supports_evex()) { 2150 tuple_type = EVEX_FVM; 2151 } 2152 InstructionMark im(this); 2153 simd_prefix(dst, src, VEX_SIMD_F3, false); 2154 emit_int8(0x7F); 2155 emit_operand(src, dst); 2156 } 2157 2158 // Move Unaligned 256bit Vector 2159 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2160 assert(UseAVX > 0, ""); 2161 if (VM_Version::supports_evex()) { 2162 tuple_type = EVEX_FVM; 2163 } 2164 int vector_len = AVX_256bit; 2165 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector_len); 2166 emit_int8(0x6F); 2167 emit_int8((unsigned char)(0xC0 | encode)); 2168 } 2169 2170 void Assembler::vmovdqu(XMMRegister dst, Address src) { 2171 assert(UseAVX > 0, ""); 2172 if (VM_Version::supports_evex()) { 2173 tuple_type = EVEX_FVM; 2174 } 2175 InstructionMark im(this); 2176 int vector_len = AVX_256bit; 2177 vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector_len, false); 2178 emit_int8(0x6F); 2179 emit_operand(dst, src); 2180 } 2181 2182 void Assembler::vmovdqu(Address dst, XMMRegister src) { 2183 assert(UseAVX > 0, ""); 2184 if (VM_Version::supports_evex()) { 2185 tuple_type = EVEX_FVM; 2186 } 2187 InstructionMark im(this); 2188 int vector_len = AVX_256bit; 2189 // swap src<->dst for encoding 2190 assert(src != xnoreg, "sanity"); 2191 vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector_len, false); 2192 emit_int8(0x7F); 2193 emit_operand(src, dst); 2194 } 2195 2196 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64) 2197 void Assembler::evmovdqu(XMMRegister dst, XMMRegister src, int vector_len) { 2198 assert(UseAVX > 0, ""); 2199 int src_enc = src->encoding(); 2200 int dst_enc = dst->encoding(); 2201 int encode = vex_prefix_and_encode(dst_enc, 0, src_enc, VEX_SIMD_F3, VEX_OPCODE_0F, 2202 true, vector_len, false, false); 2203 emit_int8(0x6F); 2204 emit_int8((unsigned char)(0xC0 | encode)); 2205 } 2206 2207 void Assembler::evmovdqu(XMMRegister dst, Address src, int vector_len) { 2208 assert(UseAVX > 0, ""); 2209 InstructionMark im(this); 2210 if (VM_Version::supports_evex()) { 2211 tuple_type = EVEX_FVM; 2212 vex_prefix_q(dst, xnoreg, src, VEX_SIMD_F3, vector_len, false); 2213 } else { 2214 vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector_len, false); 2215 } 2216 emit_int8(0x6F); 2217 emit_operand(dst, src); 2218 } 2219 2220 void Assembler::evmovdqu(Address dst, XMMRegister src, int vector_len) { 2221 assert(UseAVX > 0, ""); 2222 InstructionMark im(this); 2223 assert(src != xnoreg, "sanity"); 2224 if (VM_Version::supports_evex()) { 2225 tuple_type = EVEX_FVM; 2226 // swap src<->dst for encoding 2227 vex_prefix_q(src, xnoreg, dst, VEX_SIMD_F3, vector_len, false); 2228 } else { 2229 // swap src<->dst for encoding 2230 vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector_len, false); 2231 } 2232 emit_int8(0x7F); 2233 emit_operand(src, dst); 2234 } 2235 2236 // Uses zero extension on 64bit 2237 2238 void Assembler::movl(Register dst, int32_t imm32) { 2239 int encode = prefix_and_encode(dst->encoding()); 2240 emit_int8((unsigned char)(0xB8 | encode)); 2241 emit_int32(imm32); 2242 } 2243 2244 void Assembler::movl(Register dst, Register src) { 2245 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2246 emit_int8((unsigned char)0x8B); 2247 emit_int8((unsigned char)(0xC0 | encode)); 2248 } 2249 2250 void Assembler::movl(Register dst, Address src) { 2251 InstructionMark im(this); 2252 prefix(src, dst); 2253 emit_int8((unsigned char)0x8B); 2254 emit_operand(dst, src); 2255 } 2256 2257 void Assembler::movl(Address dst, int32_t imm32) { 2258 InstructionMark im(this); 2259 prefix(dst); 2260 emit_int8((unsigned char)0xC7); 2261 emit_operand(rax, dst, 4); 2262 emit_int32(imm32); 2263 } 2264 2265 void Assembler::movl(Address dst, Register src) { 2266 InstructionMark im(this); 2267 prefix(dst, src); 2268 emit_int8((unsigned char)0x89); 2269 emit_operand(src, dst); 2270 } 2271 2272 // New cpus require to use movsd and movss to avoid partial register stall 2273 // when loading from memory. But for old Opteron use movlpd instead of movsd. 2274 // The selection is done in MacroAssembler::movdbl() and movflt(). 2275 void Assembler::movlpd(XMMRegister dst, Address src) { 2276 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2277 if (VM_Version::supports_evex()) { 2278 tuple_type = EVEX_T1S; 2279 input_size_in_bits = EVEX_32bit; 2280 } 2281 emit_simd_arith(0x12, dst, src, VEX_SIMD_66, true); 2282 } 2283 2284 void Assembler::movq( MMXRegister dst, Address src ) { 2285 assert( VM_Version::supports_mmx(), "" ); 2286 emit_int8(0x0F); 2287 emit_int8(0x6F); 2288 emit_operand(dst, src); 2289 } 2290 2291 void Assembler::movq( Address dst, MMXRegister src ) { 2292 assert( VM_Version::supports_mmx(), "" ); 2293 emit_int8(0x0F); 2294 emit_int8(0x7F); 2295 // workaround gcc (3.2.1-7a) bug 2296 // In that version of gcc with only an emit_operand(MMX, Address) 2297 // gcc will tail jump and try and reverse the parameters completely 2298 // obliterating dst in the process. By having a version available 2299 // that doesn't need to swap the args at the tail jump the bug is 2300 // avoided. 2301 emit_operand(dst, src); 2302 } 2303 2304 void Assembler::movq(XMMRegister dst, Address src) { 2305 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2306 InstructionMark im(this); 2307 if (VM_Version::supports_evex()) { 2308 tuple_type = EVEX_T1S; 2309 input_size_in_bits = EVEX_64bit; 2310 simd_prefix_q(dst, xnoreg, src, VEX_SIMD_F3, true); 2311 } else { 2312 simd_prefix(dst, src, VEX_SIMD_F3, true, VEX_OPCODE_0F); 2313 } 2314 emit_int8(0x7E); 2315 emit_operand(dst, src); 2316 } 2317 2318 void Assembler::movq(Address dst, XMMRegister src) { 2319 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2320 InstructionMark im(this); 2321 if (VM_Version::supports_evex()) { 2322 tuple_type = EVEX_T1S; 2323 input_size_in_bits = EVEX_64bit; 2324 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, true, 2325 VEX_OPCODE_0F, true, AVX_128bit); 2326 } else { 2327 simd_prefix(dst, src, VEX_SIMD_66, true); 2328 } 2329 emit_int8((unsigned char)0xD6); 2330 emit_operand(src, dst); 2331 } 2332 2333 void Assembler::movsbl(Register dst, Address src) { // movsxb 2334 InstructionMark im(this); 2335 prefix(src, dst); 2336 emit_int8(0x0F); 2337 emit_int8((unsigned char)0xBE); 2338 emit_operand(dst, src); 2339 } 2340 2341 void Assembler::movsbl(Register dst, Register src) { // movsxb 2342 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2343 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); 2344 emit_int8(0x0F); 2345 emit_int8((unsigned char)0xBE); 2346 emit_int8((unsigned char)(0xC0 | encode)); 2347 } 2348 2349 void Assembler::movsd(XMMRegister dst, XMMRegister src) { 2350 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2351 if (VM_Version::supports_evex()) { 2352 emit_simd_arith_q(0x10, dst, src, VEX_SIMD_F2, true); 2353 } else { 2354 emit_simd_arith(0x10, dst, src, VEX_SIMD_F2); 2355 } 2356 } 2357 2358 void Assembler::movsd(XMMRegister dst, Address src) { 2359 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2360 if (VM_Version::supports_evex()) { 2361 tuple_type = EVEX_T1S; 2362 input_size_in_bits = EVEX_64bit; 2363 emit_simd_arith_nonds_q(0x10, dst, src, VEX_SIMD_F2, true); 2364 } else { 2365 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2); 2366 } 2367 } 2368 2369 void Assembler::movsd(Address dst, XMMRegister src) { 2370 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2371 InstructionMark im(this); 2372 if (VM_Version::supports_evex()) { 2373 tuple_type = EVEX_T1S; 2374 input_size_in_bits = EVEX_64bit; 2375 simd_prefix_q(src, xnoreg, dst, VEX_SIMD_F2); 2376 } else { 2377 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, false); 2378 } 2379 emit_int8(0x11); 2380 emit_operand(src, dst); 2381 } 2382 2383 void Assembler::movss(XMMRegister dst, XMMRegister src) { 2384 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2385 emit_simd_arith(0x10, dst, src, VEX_SIMD_F3, true); 2386 } 2387 2388 void Assembler::movss(XMMRegister dst, Address src) { 2389 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2390 if (VM_Version::supports_evex()) { 2391 tuple_type = EVEX_T1S; 2392 input_size_in_bits = EVEX_32bit; 2393 } 2394 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3, true); 2395 } 2396 2397 void Assembler::movss(Address dst, XMMRegister src) { 2398 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2399 if (VM_Version::supports_evex()) { 2400 tuple_type = EVEX_T1S; 2401 input_size_in_bits = EVEX_32bit; 2402 } 2403 InstructionMark im(this); 2404 simd_prefix(dst, src, VEX_SIMD_F3, false); 2405 emit_int8(0x11); 2406 emit_operand(src, dst); 2407 } 2408 2409 void Assembler::movswl(Register dst, Address src) { // movsxw 2410 InstructionMark im(this); 2411 prefix(src, dst); 2412 emit_int8(0x0F); 2413 emit_int8((unsigned char)0xBF); 2414 emit_operand(dst, src); 2415 } 2416 2417 void Assembler::movswl(Register dst, Register src) { // movsxw 2418 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2419 emit_int8(0x0F); 2420 emit_int8((unsigned char)0xBF); 2421 emit_int8((unsigned char)(0xC0 | encode)); 2422 } 2423 2424 void Assembler::movw(Address dst, int imm16) { 2425 InstructionMark im(this); 2426 2427 emit_int8(0x66); // switch to 16-bit mode 2428 prefix(dst); 2429 emit_int8((unsigned char)0xC7); 2430 emit_operand(rax, dst, 2); 2431 emit_int16(imm16); 2432 } 2433 2434 void Assembler::movw(Register dst, Address src) { 2435 InstructionMark im(this); 2436 emit_int8(0x66); 2437 prefix(src, dst); 2438 emit_int8((unsigned char)0x8B); 2439 emit_operand(dst, src); 2440 } 2441 2442 void Assembler::movw(Address dst, Register src) { 2443 InstructionMark im(this); 2444 emit_int8(0x66); 2445 prefix(dst, src); 2446 emit_int8((unsigned char)0x89); 2447 emit_operand(src, dst); 2448 } 2449 2450 void Assembler::movzbl(Register dst, Address src) { // movzxb 2451 InstructionMark im(this); 2452 prefix(src, dst); 2453 emit_int8(0x0F); 2454 emit_int8((unsigned char)0xB6); 2455 emit_operand(dst, src); 2456 } 2457 2458 void Assembler::movzbl(Register dst, Register src) { // movzxb 2459 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2460 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); 2461 emit_int8(0x0F); 2462 emit_int8((unsigned char)0xB6); 2463 emit_int8(0xC0 | encode); 2464 } 2465 2466 void Assembler::movzwl(Register dst, Address src) { // movzxw 2467 InstructionMark im(this); 2468 prefix(src, dst); 2469 emit_int8(0x0F); 2470 emit_int8((unsigned char)0xB7); 2471 emit_operand(dst, src); 2472 } 2473 2474 void Assembler::movzwl(Register dst, Register src) { // movzxw 2475 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2476 emit_int8(0x0F); 2477 emit_int8((unsigned char)0xB7); 2478 emit_int8(0xC0 | encode); 2479 } 2480 2481 void Assembler::mull(Address src) { 2482 InstructionMark im(this); 2483 prefix(src); 2484 emit_int8((unsigned char)0xF7); 2485 emit_operand(rsp, src); 2486 } 2487 2488 void Assembler::mull(Register src) { 2489 int encode = prefix_and_encode(src->encoding()); 2490 emit_int8((unsigned char)0xF7); 2491 emit_int8((unsigned char)(0xE0 | encode)); 2492 } 2493 2494 void Assembler::mulsd(XMMRegister dst, Address src) { 2495 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2496 if (VM_Version::supports_evex()) { 2497 tuple_type = EVEX_T1S; 2498 input_size_in_bits = EVEX_64bit; 2499 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_F2); 2500 } else { 2501 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2); 2502 } 2503 } 2504 2505 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { 2506 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2507 if (VM_Version::supports_evex()) { 2508 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_F2); 2509 } else { 2510 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2); 2511 } 2512 } 2513 2514 void Assembler::mulss(XMMRegister dst, Address src) { 2515 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2516 if (VM_Version::supports_evex()) { 2517 tuple_type = EVEX_T1S; 2518 input_size_in_bits = EVEX_32bit; 2519 } 2520 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3); 2521 } 2522 2523 void Assembler::mulss(XMMRegister dst, XMMRegister src) { 2524 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2525 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3); 2526 } 2527 2528 void Assembler::negl(Register dst) { 2529 int encode = prefix_and_encode(dst->encoding()); 2530 emit_int8((unsigned char)0xF7); 2531 emit_int8((unsigned char)(0xD8 | encode)); 2532 } 2533 2534 void Assembler::nop(int i) { 2535 #ifdef ASSERT 2536 assert(i > 0, " "); 2537 // The fancy nops aren't currently recognized by debuggers making it a 2538 // pain to disassemble code while debugging. If asserts are on clearly 2539 // speed is not an issue so simply use the single byte traditional nop 2540 // to do alignment. 2541 2542 for (; i > 0 ; i--) emit_int8((unsigned char)0x90); 2543 return; 2544 2545 #endif // ASSERT 2546 2547 if (UseAddressNop && VM_Version::is_intel()) { 2548 // 2549 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel 2550 // 1: 0x90 2551 // 2: 0x66 0x90 2552 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2553 // 4: 0x0F 0x1F 0x40 0x00 2554 // 5: 0x0F 0x1F 0x44 0x00 0x00 2555 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2556 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2557 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2558 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2559 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2560 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2561 2562 // The rest coding is Intel specific - don't use consecutive address nops 2563 2564 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2565 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2566 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2567 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2568 2569 while(i >= 15) { 2570 // For Intel don't generate consecutive addess nops (mix with regular nops) 2571 i -= 15; 2572 emit_int8(0x66); // size prefix 2573 emit_int8(0x66); // size prefix 2574 emit_int8(0x66); // size prefix 2575 addr_nop_8(); 2576 emit_int8(0x66); // size prefix 2577 emit_int8(0x66); // size prefix 2578 emit_int8(0x66); // size prefix 2579 emit_int8((unsigned char)0x90); 2580 // nop 2581 } 2582 switch (i) { 2583 case 14: 2584 emit_int8(0x66); // size prefix 2585 case 13: 2586 emit_int8(0x66); // size prefix 2587 case 12: 2588 addr_nop_8(); 2589 emit_int8(0x66); // size prefix 2590 emit_int8(0x66); // size prefix 2591 emit_int8(0x66); // size prefix 2592 emit_int8((unsigned char)0x90); 2593 // nop 2594 break; 2595 case 11: 2596 emit_int8(0x66); // size prefix 2597 case 10: 2598 emit_int8(0x66); // size prefix 2599 case 9: 2600 emit_int8(0x66); // size prefix 2601 case 8: 2602 addr_nop_8(); 2603 break; 2604 case 7: 2605 addr_nop_7(); 2606 break; 2607 case 6: 2608 emit_int8(0x66); // size prefix 2609 case 5: 2610 addr_nop_5(); 2611 break; 2612 case 4: 2613 addr_nop_4(); 2614 break; 2615 case 3: 2616 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 2617 emit_int8(0x66); // size prefix 2618 case 2: 2619 emit_int8(0x66); // size prefix 2620 case 1: 2621 emit_int8((unsigned char)0x90); 2622 // nop 2623 break; 2624 default: 2625 assert(i == 0, " "); 2626 } 2627 return; 2628 } 2629 if (UseAddressNop && VM_Version::is_amd()) { 2630 // 2631 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. 2632 // 1: 0x90 2633 // 2: 0x66 0x90 2634 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2635 // 4: 0x0F 0x1F 0x40 0x00 2636 // 5: 0x0F 0x1F 0x44 0x00 0x00 2637 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2638 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2639 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2640 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2641 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2642 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2643 2644 // The rest coding is AMD specific - use consecutive address nops 2645 2646 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 2647 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 2648 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2649 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2650 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2651 // Size prefixes (0x66) are added for larger sizes 2652 2653 while(i >= 22) { 2654 i -= 11; 2655 emit_int8(0x66); // size prefix 2656 emit_int8(0x66); // size prefix 2657 emit_int8(0x66); // size prefix 2658 addr_nop_8(); 2659 } 2660 // Generate first nop for size between 21-12 2661 switch (i) { 2662 case 21: 2663 i -= 1; 2664 emit_int8(0x66); // size prefix 2665 case 20: 2666 case 19: 2667 i -= 1; 2668 emit_int8(0x66); // size prefix 2669 case 18: 2670 case 17: 2671 i -= 1; 2672 emit_int8(0x66); // size prefix 2673 case 16: 2674 case 15: 2675 i -= 8; 2676 addr_nop_8(); 2677 break; 2678 case 14: 2679 case 13: 2680 i -= 7; 2681 addr_nop_7(); 2682 break; 2683 case 12: 2684 i -= 6; 2685 emit_int8(0x66); // size prefix 2686 addr_nop_5(); 2687 break; 2688 default: 2689 assert(i < 12, " "); 2690 } 2691 2692 // Generate second nop for size between 11-1 2693 switch (i) { 2694 case 11: 2695 emit_int8(0x66); // size prefix 2696 case 10: 2697 emit_int8(0x66); // size prefix 2698 case 9: 2699 emit_int8(0x66); // size prefix 2700 case 8: 2701 addr_nop_8(); 2702 break; 2703 case 7: 2704 addr_nop_7(); 2705 break; 2706 case 6: 2707 emit_int8(0x66); // size prefix 2708 case 5: 2709 addr_nop_5(); 2710 break; 2711 case 4: 2712 addr_nop_4(); 2713 break; 2714 case 3: 2715 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 2716 emit_int8(0x66); // size prefix 2717 case 2: 2718 emit_int8(0x66); // size prefix 2719 case 1: 2720 emit_int8((unsigned char)0x90); 2721 // nop 2722 break; 2723 default: 2724 assert(i == 0, " "); 2725 } 2726 return; 2727 } 2728 2729 // Using nops with size prefixes "0x66 0x90". 2730 // From AMD Optimization Guide: 2731 // 1: 0x90 2732 // 2: 0x66 0x90 2733 // 3: 0x66 0x66 0x90 2734 // 4: 0x66 0x66 0x66 0x90 2735 // 5: 0x66 0x66 0x90 0x66 0x90 2736 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 2737 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 2738 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 2739 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 2740 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 2741 // 2742 while(i > 12) { 2743 i -= 4; 2744 emit_int8(0x66); // size prefix 2745 emit_int8(0x66); 2746 emit_int8(0x66); 2747 emit_int8((unsigned char)0x90); 2748 // nop 2749 } 2750 // 1 - 12 nops 2751 if(i > 8) { 2752 if(i > 9) { 2753 i -= 1; 2754 emit_int8(0x66); 2755 } 2756 i -= 3; 2757 emit_int8(0x66); 2758 emit_int8(0x66); 2759 emit_int8((unsigned char)0x90); 2760 } 2761 // 1 - 8 nops 2762 if(i > 4) { 2763 if(i > 6) { 2764 i -= 1; 2765 emit_int8(0x66); 2766 } 2767 i -= 3; 2768 emit_int8(0x66); 2769 emit_int8(0x66); 2770 emit_int8((unsigned char)0x90); 2771 } 2772 switch (i) { 2773 case 4: 2774 emit_int8(0x66); 2775 case 3: 2776 emit_int8(0x66); 2777 case 2: 2778 emit_int8(0x66); 2779 case 1: 2780 emit_int8((unsigned char)0x90); 2781 break; 2782 default: 2783 assert(i == 0, " "); 2784 } 2785 } 2786 2787 void Assembler::notl(Register dst) { 2788 int encode = prefix_and_encode(dst->encoding()); 2789 emit_int8((unsigned char)0xF7); 2790 emit_int8((unsigned char)(0xD0 | encode)); 2791 } 2792 2793 void Assembler::orl(Address dst, int32_t imm32) { 2794 InstructionMark im(this); 2795 prefix(dst); 2796 emit_arith_operand(0x81, rcx, dst, imm32); 2797 } 2798 2799 void Assembler::orl(Register dst, int32_t imm32) { 2800 prefix(dst); 2801 emit_arith(0x81, 0xC8, dst, imm32); 2802 } 2803 2804 void Assembler::orl(Register dst, Address src) { 2805 InstructionMark im(this); 2806 prefix(src, dst); 2807 emit_int8(0x0B); 2808 emit_operand(dst, src); 2809 } 2810 2811 void Assembler::orl(Register dst, Register src) { 2812 (void) prefix_and_encode(dst->encoding(), src->encoding()); 2813 emit_arith(0x0B, 0xC0, dst, src); 2814 } 2815 2816 void Assembler::packuswb(XMMRegister dst, Address src) { 2817 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2818 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 2819 if (VM_Version::supports_evex()) { 2820 tuple_type = EVEX_FV; 2821 input_size_in_bits = EVEX_32bit; 2822 } 2823 emit_simd_arith(0x67, dst, src, VEX_SIMD_66, 2824 false, (VM_Version::supports_avx512dq() == false)); 2825 } 2826 2827 void Assembler::packuswb(XMMRegister dst, XMMRegister src) { 2828 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2829 emit_simd_arith(0x67, dst, src, VEX_SIMD_66, 2830 false, (VM_Version::supports_avx512dq() == false)); 2831 } 2832 2833 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2834 assert(UseAVX > 0, "some form of AVX must be enabled"); 2835 emit_vex_arith(0x67, dst, nds, src, VEX_SIMD_66, vector_len, 2836 false, (VM_Version::supports_avx512dq() == false)); 2837 } 2838 2839 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 2840 assert(VM_Version::supports_avx2(), ""); 2841 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, 2842 VEX_OPCODE_0F_3A, true, vector_len); 2843 emit_int8(0x00); 2844 emit_int8(0xC0 | encode); 2845 emit_int8(imm8); 2846 } 2847 2848 void Assembler::pause() { 2849 emit_int8((unsigned char)0xF3); 2850 emit_int8((unsigned char)0x90); 2851 } 2852 2853 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 2854 assert(VM_Version::supports_sse4_2(), ""); 2855 InstructionMark im(this); 2856 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, false, VEX_OPCODE_0F_3A, 2857 false, AVX_128bit, true); 2858 emit_int8(0x61); 2859 emit_operand(dst, src); 2860 emit_int8(imm8); 2861 } 2862 2863 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 2864 assert(VM_Version::supports_sse4_2(), ""); 2865 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, 2866 VEX_OPCODE_0F_3A, false, AVX_128bit, true); 2867 emit_int8(0x61); 2868 emit_int8((unsigned char)(0xC0 | encode)); 2869 emit_int8(imm8); 2870 } 2871 2872 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 2873 assert(VM_Version::supports_sse4_1(), ""); 2874 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2875 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2876 emit_int8(0x16); 2877 emit_int8((unsigned char)(0xC0 | encode)); 2878 emit_int8(imm8); 2879 } 2880 2881 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 2882 assert(VM_Version::supports_sse4_1(), ""); 2883 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2884 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2885 emit_int8(0x16); 2886 emit_int8((unsigned char)(0xC0 | encode)); 2887 emit_int8(imm8); 2888 } 2889 2890 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 2891 assert(VM_Version::supports_sse4_1(), ""); 2892 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2893 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2894 emit_int8(0x22); 2895 emit_int8((unsigned char)(0xC0 | encode)); 2896 emit_int8(imm8); 2897 } 2898 2899 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 2900 assert(VM_Version::supports_sse4_1(), ""); 2901 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2902 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2903 emit_int8(0x22); 2904 emit_int8((unsigned char)(0xC0 | encode)); 2905 emit_int8(imm8); 2906 } 2907 2908 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 2909 assert(VM_Version::supports_sse4_1(), ""); 2910 if (VM_Version::supports_evex()) { 2911 tuple_type = EVEX_HVM; 2912 } 2913 InstructionMark im(this); 2914 simd_prefix(dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38); 2915 emit_int8(0x30); 2916 emit_operand(dst, src); 2917 } 2918 2919 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 2920 assert(VM_Version::supports_sse4_1(), ""); 2921 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38); 2922 emit_int8(0x30); 2923 emit_int8((unsigned char)(0xC0 | encode)); 2924 } 2925 2926 // generic 2927 void Assembler::pop(Register dst) { 2928 int encode = prefix_and_encode(dst->encoding()); 2929 emit_int8(0x58 | encode); 2930 } 2931 2932 void Assembler::popcntl(Register dst, Address src) { 2933 assert(VM_Version::supports_popcnt(), "must support"); 2934 InstructionMark im(this); 2935 emit_int8((unsigned char)0xF3); 2936 prefix(src, dst); 2937 emit_int8(0x0F); 2938 emit_int8((unsigned char)0xB8); 2939 emit_operand(dst, src); 2940 } 2941 2942 void Assembler::popcntl(Register dst, Register src) { 2943 assert(VM_Version::supports_popcnt(), "must support"); 2944 emit_int8((unsigned char)0xF3); 2945 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2946 emit_int8(0x0F); 2947 emit_int8((unsigned char)0xB8); 2948 emit_int8((unsigned char)(0xC0 | encode)); 2949 } 2950 2951 void Assembler::popf() { 2952 emit_int8((unsigned char)0x9D); 2953 } 2954 2955 #ifndef _LP64 // no 32bit push/pop on amd64 2956 void Assembler::popl(Address dst) { 2957 // NOTE: this will adjust stack by 8byte on 64bits 2958 InstructionMark im(this); 2959 prefix(dst); 2960 emit_int8((unsigned char)0x8F); 2961 emit_operand(rax, dst); 2962 } 2963 #endif 2964 2965 void Assembler::prefetch_prefix(Address src) { 2966 prefix(src); 2967 emit_int8(0x0F); 2968 } 2969 2970 void Assembler::prefetchnta(Address src) { 2971 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 2972 InstructionMark im(this); 2973 prefetch_prefix(src); 2974 emit_int8(0x18); 2975 emit_operand(rax, src); // 0, src 2976 } 2977 2978 void Assembler::prefetchr(Address src) { 2979 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 2980 InstructionMark im(this); 2981 prefetch_prefix(src); 2982 emit_int8(0x0D); 2983 emit_operand(rax, src); // 0, src 2984 } 2985 2986 void Assembler::prefetcht0(Address src) { 2987 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 2988 InstructionMark im(this); 2989 prefetch_prefix(src); 2990 emit_int8(0x18); 2991 emit_operand(rcx, src); // 1, src 2992 } 2993 2994 void Assembler::prefetcht1(Address src) { 2995 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 2996 InstructionMark im(this); 2997 prefetch_prefix(src); 2998 emit_int8(0x18); 2999 emit_operand(rdx, src); // 2, src 3000 } 3001 3002 void Assembler::prefetcht2(Address src) { 3003 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3004 InstructionMark im(this); 3005 prefetch_prefix(src); 3006 emit_int8(0x18); 3007 emit_operand(rbx, src); // 3, src 3008 } 3009 3010 void Assembler::prefetchw(Address src) { 3011 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3012 InstructionMark im(this); 3013 prefetch_prefix(src); 3014 emit_int8(0x0D); 3015 emit_operand(rcx, src); // 1, src 3016 } 3017 3018 void Assembler::prefix(Prefix p) { 3019 emit_int8(p); 3020 } 3021 3022 void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 3023 assert(VM_Version::supports_ssse3(), ""); 3024 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38, 3025 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3026 emit_int8(0x00); 3027 emit_int8((unsigned char)(0xC0 | encode)); 3028 } 3029 3030 void Assembler::pshufb(XMMRegister dst, Address src) { 3031 assert(VM_Version::supports_ssse3(), ""); 3032 if (VM_Version::supports_evex()) { 3033 tuple_type = EVEX_FVM; 3034 } 3035 InstructionMark im(this); 3036 simd_prefix(dst, dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38, 3037 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3038 emit_int8(0x00); 3039 emit_operand(dst, src); 3040 } 3041 3042 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 3043 assert(isByte(mode), "invalid value"); 3044 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3045 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66); 3046 emit_int8(mode & 0xFF); 3047 3048 } 3049 3050 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { 3051 assert(isByte(mode), "invalid value"); 3052 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3053 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3054 if (VM_Version::supports_evex()) { 3055 tuple_type = EVEX_FV; 3056 input_size_in_bits = EVEX_32bit; 3057 } 3058 InstructionMark im(this); 3059 simd_prefix(dst, src, VEX_SIMD_66, false); 3060 emit_int8(0x70); 3061 emit_operand(dst, src); 3062 emit_int8(mode & 0xFF); 3063 } 3064 3065 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3066 assert(isByte(mode), "invalid value"); 3067 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3068 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2, false, 3069 (VM_Version::supports_avx512bw() == false)); 3070 emit_int8(mode & 0xFF); 3071 } 3072 3073 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { 3074 assert(isByte(mode), "invalid value"); 3075 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3076 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3077 if (VM_Version::supports_evex()) { 3078 tuple_type = EVEX_FVM; 3079 } 3080 InstructionMark im(this); 3081 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, false, VEX_OPCODE_0F, 3082 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3083 emit_int8(0x70); 3084 emit_operand(dst, src); 3085 emit_int8(mode & 0xFF); 3086 } 3087 3088 void Assembler::psrldq(XMMRegister dst, int shift) { 3089 // Shift 128 bit value in xmm register by number of bytes. 3090 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3091 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, 3092 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3093 emit_int8(0x73); 3094 emit_int8((unsigned char)(0xC0 | encode)); 3095 emit_int8(shift); 3096 } 3097 3098 void Assembler::ptest(XMMRegister dst, Address src) { 3099 assert(VM_Version::supports_sse4_1(), ""); 3100 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3101 InstructionMark im(this); 3102 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, false, 3103 VEX_OPCODE_0F_38, false, AVX_128bit, true); 3104 emit_int8(0x17); 3105 emit_operand(dst, src); 3106 } 3107 3108 void Assembler::ptest(XMMRegister dst, XMMRegister src) { 3109 assert(VM_Version::supports_sse4_1(), ""); 3110 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, 3111 VEX_OPCODE_0F_38, false, AVX_128bit, true); 3112 emit_int8(0x17); 3113 emit_int8((unsigned char)(0xC0 | encode)); 3114 } 3115 3116 void Assembler::vptest(XMMRegister dst, Address src) { 3117 assert(VM_Version::supports_avx(), ""); 3118 InstructionMark im(this); 3119 int vector_len = AVX_256bit; 3120 assert(dst != xnoreg, "sanity"); 3121 int dst_enc = dst->encoding(); 3122 // swap src<->dst for encoding 3123 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len, true, false); 3124 emit_int8(0x17); 3125 emit_operand(dst, src); 3126 } 3127 3128 void Assembler::vptest(XMMRegister dst, XMMRegister src) { 3129 assert(VM_Version::supports_avx(), ""); 3130 int vector_len = AVX_256bit; 3131 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 3132 vector_len, VEX_OPCODE_0F_38, true, false); 3133 emit_int8(0x17); 3134 emit_int8((unsigned char)(0xC0 | encode)); 3135 } 3136 3137 void Assembler::punpcklbw(XMMRegister dst, Address src) { 3138 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3139 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3140 if (VM_Version::supports_evex()) { 3141 tuple_type = EVEX_FVM; 3142 } 3143 emit_simd_arith(0x60, dst, src, VEX_SIMD_66); 3144 } 3145 3146 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3147 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3148 emit_simd_arith(0x60, dst, src, VEX_SIMD_66); 3149 } 3150 3151 void Assembler::punpckldq(XMMRegister dst, Address src) { 3152 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3153 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3154 if (VM_Version::supports_evex()) { 3155 tuple_type = EVEX_FV; 3156 input_size_in_bits = EVEX_32bit; 3157 } 3158 emit_simd_arith(0x62, dst, src, VEX_SIMD_66); 3159 } 3160 3161 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { 3162 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3163 emit_simd_arith(0x62, dst, src, VEX_SIMD_66); 3164 } 3165 3166 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 3167 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3168 emit_simd_arith(0x6C, dst, src, VEX_SIMD_66); 3169 } 3170 3171 void Assembler::push(int32_t imm32) { 3172 // in 64bits we push 64bits onto the stack but only 3173 // take a 32bit immediate 3174 emit_int8(0x68); 3175 emit_int32(imm32); 3176 } 3177 3178 void Assembler::push(Register src) { 3179 int encode = prefix_and_encode(src->encoding()); 3180 3181 emit_int8(0x50 | encode); 3182 } 3183 3184 void Assembler::pushf() { 3185 emit_int8((unsigned char)0x9C); 3186 } 3187 3188 #ifndef _LP64 // no 32bit push/pop on amd64 3189 void Assembler::pushl(Address src) { 3190 // Note this will push 64bit on 64bit 3191 InstructionMark im(this); 3192 prefix(src); 3193 emit_int8((unsigned char)0xFF); 3194 emit_operand(rsi, src); 3195 } 3196 #endif 3197 3198 void Assembler::rcll(Register dst, int imm8) { 3199 assert(isShiftCount(imm8), "illegal shift count"); 3200 int encode = prefix_and_encode(dst->encoding()); 3201 if (imm8 == 1) { 3202 emit_int8((unsigned char)0xD1); 3203 emit_int8((unsigned char)(0xD0 | encode)); 3204 } else { 3205 emit_int8((unsigned char)0xC1); 3206 emit_int8((unsigned char)0xD0 | encode); 3207 emit_int8(imm8); 3208 } 3209 } 3210 3211 void Assembler::rdtsc() { 3212 emit_int8((unsigned char)0x0F); 3213 emit_int8((unsigned char)0x31); 3214 } 3215 3216 // copies data from [esi] to [edi] using rcx pointer sized words 3217 // generic 3218 void Assembler::rep_mov() { 3219 emit_int8((unsigned char)0xF3); 3220 // MOVSQ 3221 LP64_ONLY(prefix(REX_W)); 3222 emit_int8((unsigned char)0xA5); 3223 } 3224 3225 // sets rcx bytes with rax, value at [edi] 3226 void Assembler::rep_stosb() { 3227 emit_int8((unsigned char)0xF3); // REP 3228 LP64_ONLY(prefix(REX_W)); 3229 emit_int8((unsigned char)0xAA); // STOSB 3230 } 3231 3232 // sets rcx pointer sized words with rax, value at [edi] 3233 // generic 3234 void Assembler::rep_stos() { 3235 emit_int8((unsigned char)0xF3); // REP 3236 LP64_ONLY(prefix(REX_W)); // LP64:STOSQ, LP32:STOSD 3237 emit_int8((unsigned char)0xAB); 3238 } 3239 3240 // scans rcx pointer sized words at [edi] for occurance of rax, 3241 // generic 3242 void Assembler::repne_scan() { // repne_scan 3243 emit_int8((unsigned char)0xF2); 3244 // SCASQ 3245 LP64_ONLY(prefix(REX_W)); 3246 emit_int8((unsigned char)0xAF); 3247 } 3248 3249 #ifdef _LP64 3250 // scans rcx 4 byte words at [edi] for occurance of rax, 3251 // generic 3252 void Assembler::repne_scanl() { // repne_scan 3253 emit_int8((unsigned char)0xF2); 3254 // SCASL 3255 emit_int8((unsigned char)0xAF); 3256 } 3257 #endif 3258 3259 void Assembler::ret(int imm16) { 3260 if (imm16 == 0) { 3261 emit_int8((unsigned char)0xC3); 3262 } else { 3263 emit_int8((unsigned char)0xC2); 3264 emit_int16(imm16); 3265 } 3266 } 3267 3268 void Assembler::sahf() { 3269 #ifdef _LP64 3270 // Not supported in 64bit mode 3271 ShouldNotReachHere(); 3272 #endif 3273 emit_int8((unsigned char)0x9E); 3274 } 3275 3276 void Assembler::sarl(Register dst, int imm8) { 3277 int encode = prefix_and_encode(dst->encoding()); 3278 assert(isShiftCount(imm8), "illegal shift count"); 3279 if (imm8 == 1) { 3280 emit_int8((unsigned char)0xD1); 3281 emit_int8((unsigned char)(0xF8 | encode)); 3282 } else { 3283 emit_int8((unsigned char)0xC1); 3284 emit_int8((unsigned char)(0xF8 | encode)); 3285 emit_int8(imm8); 3286 } 3287 } 3288 3289 void Assembler::sarl(Register dst) { 3290 int encode = prefix_and_encode(dst->encoding()); 3291 emit_int8((unsigned char)0xD3); 3292 emit_int8((unsigned char)(0xF8 | encode)); 3293 } 3294 3295 void Assembler::sbbl(Address dst, int32_t imm32) { 3296 InstructionMark im(this); 3297 prefix(dst); 3298 emit_arith_operand(0x81, rbx, dst, imm32); 3299 } 3300 3301 void Assembler::sbbl(Register dst, int32_t imm32) { 3302 prefix(dst); 3303 emit_arith(0x81, 0xD8, dst, imm32); 3304 } 3305 3306 3307 void Assembler::sbbl(Register dst, Address src) { 3308 InstructionMark im(this); 3309 prefix(src, dst); 3310 emit_int8(0x1B); 3311 emit_operand(dst, src); 3312 } 3313 3314 void Assembler::sbbl(Register dst, Register src) { 3315 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3316 emit_arith(0x1B, 0xC0, dst, src); 3317 } 3318 3319 void Assembler::setb(Condition cc, Register dst) { 3320 assert(0 <= cc && cc < 16, "illegal cc"); 3321 int encode = prefix_and_encode(dst->encoding(), true); 3322 emit_int8(0x0F); 3323 emit_int8((unsigned char)0x90 | cc); 3324 emit_int8((unsigned char)(0xC0 | encode)); 3325 } 3326 3327 void Assembler::shll(Register dst, int imm8) { 3328 assert(isShiftCount(imm8), "illegal shift count"); 3329 int encode = prefix_and_encode(dst->encoding()); 3330 if (imm8 == 1 ) { 3331 emit_int8((unsigned char)0xD1); 3332 emit_int8((unsigned char)(0xE0 | encode)); 3333 } else { 3334 emit_int8((unsigned char)0xC1); 3335 emit_int8((unsigned char)(0xE0 | encode)); 3336 emit_int8(imm8); 3337 } 3338 } 3339 3340 void Assembler::shll(Register dst) { 3341 int encode = prefix_and_encode(dst->encoding()); 3342 emit_int8((unsigned char)0xD3); 3343 emit_int8((unsigned char)(0xE0 | encode)); 3344 } 3345 3346 void Assembler::shrl(Register dst, int imm8) { 3347 assert(isShiftCount(imm8), "illegal shift count"); 3348 int encode = prefix_and_encode(dst->encoding()); 3349 emit_int8((unsigned char)0xC1); 3350 emit_int8((unsigned char)(0xE8 | encode)); 3351 emit_int8(imm8); 3352 } 3353 3354 void Assembler::shrl(Register dst) { 3355 int encode = prefix_and_encode(dst->encoding()); 3356 emit_int8((unsigned char)0xD3); 3357 emit_int8((unsigned char)(0xE8 | encode)); 3358 } 3359 3360 // copies a single word from [esi] to [edi] 3361 void Assembler::smovl() { 3362 emit_int8((unsigned char)0xA5); 3363 } 3364 3365 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { 3366 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3367 if (VM_Version::supports_evex()) { 3368 emit_simd_arith_q(0x51, dst, src, VEX_SIMD_F2); 3369 } else { 3370 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2); 3371 } 3372 } 3373 3374 void Assembler::sqrtsd(XMMRegister dst, Address src) { 3375 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3376 if (VM_Version::supports_evex()) { 3377 tuple_type = EVEX_T1S; 3378 input_size_in_bits = EVEX_64bit; 3379 emit_simd_arith_q(0x51, dst, src, VEX_SIMD_F2); 3380 } else { 3381 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2); 3382 } 3383 } 3384 3385 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 3386 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3387 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3); 3388 } 3389 3390 void Assembler::std() { 3391 emit_int8((unsigned char)0xFD); 3392 } 3393 3394 void Assembler::sqrtss(XMMRegister dst, Address src) { 3395 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3396 if (VM_Version::supports_evex()) { 3397 tuple_type = EVEX_T1S; 3398 input_size_in_bits = EVEX_32bit; 3399 } 3400 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3); 3401 } 3402 3403 void Assembler::stmxcsr( Address dst) { 3404 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3405 InstructionMark im(this); 3406 prefix(dst); 3407 emit_int8(0x0F); 3408 emit_int8((unsigned char)0xAE); 3409 emit_operand(as_Register(3), dst); 3410 } 3411 3412 void Assembler::subl(Address dst, int32_t imm32) { 3413 InstructionMark im(this); 3414 prefix(dst); 3415 emit_arith_operand(0x81, rbp, dst, imm32); 3416 } 3417 3418 void Assembler::subl(Address dst, Register src) { 3419 InstructionMark im(this); 3420 prefix(dst, src); 3421 emit_int8(0x29); 3422 emit_operand(src, dst); 3423 } 3424 3425 void Assembler::subl(Register dst, int32_t imm32) { 3426 prefix(dst); 3427 emit_arith(0x81, 0xE8, dst, imm32); 3428 } 3429 3430 // Force generation of a 4 byte immediate value even if it fits into 8bit 3431 void Assembler::subl_imm32(Register dst, int32_t imm32) { 3432 prefix(dst); 3433 emit_arith_imm32(0x81, 0xE8, dst, imm32); 3434 } 3435 3436 void Assembler::subl(Register dst, Address src) { 3437 InstructionMark im(this); 3438 prefix(src, dst); 3439 emit_int8(0x2B); 3440 emit_operand(dst, src); 3441 } 3442 3443 void Assembler::subl(Register dst, Register src) { 3444 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3445 emit_arith(0x2B, 0xC0, dst, src); 3446 } 3447 3448 void Assembler::subsd(XMMRegister dst, XMMRegister src) { 3449 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3450 if (VM_Version::supports_evex()) { 3451 emit_simd_arith_q(0x5C, dst, src, VEX_SIMD_F2); 3452 } else { 3453 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2); 3454 } 3455 } 3456 3457 void Assembler::subsd(XMMRegister dst, Address src) { 3458 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3459 if (VM_Version::supports_evex()) { 3460 tuple_type = EVEX_T1S; 3461 input_size_in_bits = EVEX_64bit; 3462 } 3463 emit_simd_arith_q(0x5C, dst, src, VEX_SIMD_F2); 3464 } 3465 3466 void Assembler::subss(XMMRegister dst, XMMRegister src) { 3467 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3468 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3); 3469 } 3470 3471 void Assembler::subss(XMMRegister dst, Address src) { 3472 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3473 if (VM_Version::supports_evex()) { 3474 tuple_type = EVEX_T1S; 3475 input_size_in_bits = EVEX_32bit; 3476 } 3477 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3); 3478 } 3479 3480 void Assembler::testb(Register dst, int imm8) { 3481 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 3482 (void) prefix_and_encode(dst->encoding(), true); 3483 emit_arith_b(0xF6, 0xC0, dst, imm8); 3484 } 3485 3486 void Assembler::testl(Register dst, int32_t imm32) { 3487 // not using emit_arith because test 3488 // doesn't support sign-extension of 3489 // 8bit operands 3490 int encode = dst->encoding(); 3491 if (encode == 0) { 3492 emit_int8((unsigned char)0xA9); 3493 } else { 3494 encode = prefix_and_encode(encode); 3495 emit_int8((unsigned char)0xF7); 3496 emit_int8((unsigned char)(0xC0 | encode)); 3497 } 3498 emit_int32(imm32); 3499 } 3500 3501 void Assembler::testl(Register dst, Register src) { 3502 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3503 emit_arith(0x85, 0xC0, dst, src); 3504 } 3505 3506 void Assembler::testl(Register dst, Address src) { 3507 InstructionMark im(this); 3508 prefix(src, dst); 3509 emit_int8((unsigned char)0x85); 3510 emit_operand(dst, src); 3511 } 3512 3513 void Assembler::tzcntl(Register dst, Register src) { 3514 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 3515 emit_int8((unsigned char)0xF3); 3516 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3517 emit_int8(0x0F); 3518 emit_int8((unsigned char)0xBC); 3519 emit_int8((unsigned char)0xC0 | encode); 3520 } 3521 3522 void Assembler::tzcntq(Register dst, Register src) { 3523 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 3524 emit_int8((unsigned char)0xF3); 3525 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 3526 emit_int8(0x0F); 3527 emit_int8((unsigned char)0xBC); 3528 emit_int8((unsigned char)(0xC0 | encode)); 3529 } 3530 3531 void Assembler::ucomisd(XMMRegister dst, Address src) { 3532 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3533 if (VM_Version::supports_evex()) { 3534 tuple_type = EVEX_T1S; 3535 input_size_in_bits = EVEX_64bit; 3536 emit_simd_arith_nonds_q(0x2E, dst, src, VEX_SIMD_66, true); 3537 } else { 3538 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66); 3539 } 3540 } 3541 3542 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { 3543 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3544 if (VM_Version::supports_evex()) { 3545 emit_simd_arith_nonds_q(0x2E, dst, src, VEX_SIMD_66, true); 3546 } else { 3547 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66); 3548 } 3549 } 3550 3551 void Assembler::ucomiss(XMMRegister dst, Address src) { 3552 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3553 if (VM_Version::supports_evex()) { 3554 tuple_type = EVEX_T1S; 3555 input_size_in_bits = EVEX_32bit; 3556 } 3557 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE, true); 3558 } 3559 3560 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { 3561 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3562 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE, true); 3563 } 3564 3565 void Assembler::xabort(int8_t imm8) { 3566 emit_int8((unsigned char)0xC6); 3567 emit_int8((unsigned char)0xF8); 3568 emit_int8((unsigned char)(imm8 & 0xFF)); 3569 } 3570 3571 void Assembler::xaddl(Address dst, Register src) { 3572 InstructionMark im(this); 3573 prefix(dst, src); 3574 emit_int8(0x0F); 3575 emit_int8((unsigned char)0xC1); 3576 emit_operand(src, dst); 3577 } 3578 3579 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { 3580 InstructionMark im(this); 3581 relocate(rtype); 3582 if (abort.is_bound()) { 3583 address entry = target(abort); 3584 assert(entry != NULL, "abort entry NULL"); 3585 intptr_t offset = entry - pc(); 3586 emit_int8((unsigned char)0xC7); 3587 emit_int8((unsigned char)0xF8); 3588 emit_int32(offset - 6); // 2 opcode + 4 address 3589 } else { 3590 abort.add_patch_at(code(), locator()); 3591 emit_int8((unsigned char)0xC7); 3592 emit_int8((unsigned char)0xF8); 3593 emit_int32(0); 3594 } 3595 } 3596 3597 void Assembler::xchgl(Register dst, Address src) { // xchg 3598 InstructionMark im(this); 3599 prefix(src, dst); 3600 emit_int8((unsigned char)0x87); 3601 emit_operand(dst, src); 3602 } 3603 3604 void Assembler::xchgl(Register dst, Register src) { 3605 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3606 emit_int8((unsigned char)0x87); 3607 emit_int8((unsigned char)(0xC0 | encode)); 3608 } 3609 3610 void Assembler::xend() { 3611 emit_int8((unsigned char)0x0F); 3612 emit_int8((unsigned char)0x01); 3613 emit_int8((unsigned char)0xD5); 3614 } 3615 3616 void Assembler::xgetbv() { 3617 emit_int8(0x0F); 3618 emit_int8(0x01); 3619 emit_int8((unsigned char)0xD0); 3620 } 3621 3622 void Assembler::xorl(Register dst, int32_t imm32) { 3623 prefix(dst); 3624 emit_arith(0x81, 0xF0, dst, imm32); 3625 } 3626 3627 void Assembler::xorl(Register dst, Address src) { 3628 InstructionMark im(this); 3629 prefix(src, dst); 3630 emit_int8(0x33); 3631 emit_operand(dst, src); 3632 } 3633 3634 void Assembler::xorl(Register dst, Register src) { 3635 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3636 emit_arith(0x33, 0xC0, dst, src); 3637 } 3638 3639 3640 // AVX 3-operands scalar float-point arithmetic instructions 3641 3642 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { 3643 assert(VM_Version::supports_avx(), ""); 3644 if (VM_Version::supports_evex()) { 3645 tuple_type = EVEX_T1S; 3646 input_size_in_bits = EVEX_64bit; 3647 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3648 } else { 3649 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3650 } 3651 } 3652 3653 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3654 assert(VM_Version::supports_avx(), ""); 3655 if (VM_Version::supports_evex()) { 3656 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3657 } else { 3658 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3659 } 3660 } 3661 3662 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { 3663 assert(VM_Version::supports_avx(), ""); 3664 if (VM_Version::supports_evex()) { 3665 tuple_type = EVEX_T1S; 3666 input_size_in_bits = EVEX_32bit; 3667 } 3668 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3669 } 3670 3671 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3672 assert(VM_Version::supports_avx(), ""); 3673 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3674 } 3675 3676 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { 3677 assert(VM_Version::supports_avx(), ""); 3678 if (VM_Version::supports_evex()) { 3679 tuple_type = EVEX_T1S; 3680 input_size_in_bits = EVEX_64bit; 3681 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3682 } else { 3683 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3684 } 3685 } 3686 3687 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3688 assert(VM_Version::supports_avx(), ""); 3689 if (VM_Version::supports_evex()) { 3690 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3691 } else { 3692 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3693 } 3694 } 3695 3696 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { 3697 assert(VM_Version::supports_avx(), ""); 3698 if (VM_Version::supports_evex()) { 3699 tuple_type = EVEX_T1S; 3700 input_size_in_bits = EVEX_32bit; 3701 } 3702 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3703 } 3704 3705 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3706 assert(VM_Version::supports_avx(), ""); 3707 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3708 } 3709 3710 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { 3711 assert(VM_Version::supports_avx(), ""); 3712 if (VM_Version::supports_evex()) { 3713 tuple_type = EVEX_T1S; 3714 input_size_in_bits = EVEX_64bit; 3715 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3716 } else { 3717 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3718 } 3719 } 3720 3721 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3722 assert(VM_Version::supports_avx(), ""); 3723 if (VM_Version::supports_evex()) { 3724 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3725 } else { 3726 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3727 } 3728 } 3729 3730 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { 3731 assert(VM_Version::supports_avx(), ""); 3732 if (VM_Version::supports_evex()) { 3733 tuple_type = EVEX_T1S; 3734 input_size_in_bits = EVEX_32bit; 3735 } 3736 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3737 } 3738 3739 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3740 assert(VM_Version::supports_avx(), ""); 3741 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3742 } 3743 3744 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { 3745 assert(VM_Version::supports_avx(), ""); 3746 if (VM_Version::supports_evex()) { 3747 tuple_type = EVEX_T1S; 3748 input_size_in_bits = EVEX_64bit; 3749 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3750 } else { 3751 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3752 } 3753 } 3754 3755 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3756 assert(VM_Version::supports_avx(), ""); 3757 if (VM_Version::supports_evex()) { 3758 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3759 } else { 3760 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3761 } 3762 } 3763 3764 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { 3765 assert(VM_Version::supports_avx(), ""); 3766 if (VM_Version::supports_evex()) { 3767 tuple_type = EVEX_T1S; 3768 input_size_in_bits = EVEX_32bit; 3769 } 3770 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3771 } 3772 3773 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3774 assert(VM_Version::supports_avx(), ""); 3775 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3776 } 3777 3778 //====================VECTOR ARITHMETIC===================================== 3779 3780 // Float-point vector arithmetic 3781 3782 void Assembler::addpd(XMMRegister dst, XMMRegister src) { 3783 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3784 if (VM_Version::supports_evex()) { 3785 emit_simd_arith_q(0x58, dst, src, VEX_SIMD_66); 3786 } else { 3787 emit_simd_arith(0x58, dst, src, VEX_SIMD_66); 3788 } 3789 } 3790 3791 void Assembler::addps(XMMRegister dst, XMMRegister src) { 3792 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3793 emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE); 3794 } 3795 3796 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3797 assert(VM_Version::supports_avx(), ""); 3798 if (VM_Version::supports_evex()) { 3799 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3800 } else { 3801 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3802 } 3803 } 3804 3805 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3806 assert(VM_Version::supports_avx(), ""); 3807 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector_len); 3808 } 3809 3810 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3811 assert(VM_Version::supports_avx(), ""); 3812 if (VM_Version::supports_evex()) { 3813 tuple_type = EVEX_FV; 3814 input_size_in_bits = EVEX_64bit; 3815 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3816 } else { 3817 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3818 } 3819 } 3820 3821 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3822 assert(VM_Version::supports_avx(), ""); 3823 if (VM_Version::supports_evex()) { 3824 tuple_type = EVEX_FV; 3825 input_size_in_bits = EVEX_32bit; 3826 } 3827 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector_len); 3828 } 3829 3830 void Assembler::subpd(XMMRegister dst, XMMRegister src) { 3831 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3832 if (VM_Version::supports_evex()) { 3833 emit_simd_arith_q(0x5C, dst, src, VEX_SIMD_66); 3834 } else { 3835 emit_simd_arith(0x5C, dst, src, VEX_SIMD_66); 3836 } 3837 } 3838 3839 void Assembler::subps(XMMRegister dst, XMMRegister src) { 3840 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3841 emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE); 3842 } 3843 3844 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3845 assert(VM_Version::supports_avx(), ""); 3846 if (VM_Version::supports_evex()) { 3847 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3848 } else { 3849 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3850 } 3851 } 3852 3853 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3854 assert(VM_Version::supports_avx(), ""); 3855 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector_len); 3856 } 3857 3858 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3859 assert(VM_Version::supports_avx(), ""); 3860 if (VM_Version::supports_evex()) { 3861 tuple_type = EVEX_FV; 3862 input_size_in_bits = EVEX_64bit; 3863 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3864 } else { 3865 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3866 } 3867 } 3868 3869 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3870 assert(VM_Version::supports_avx(), ""); 3871 if (VM_Version::supports_evex()) { 3872 tuple_type = EVEX_FV; 3873 input_size_in_bits = EVEX_32bit; 3874 } 3875 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector_len); 3876 } 3877 3878 void Assembler::mulpd(XMMRegister dst, XMMRegister src) { 3879 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3880 if (VM_Version::supports_evex()) { 3881 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_66); 3882 } else { 3883 emit_simd_arith(0x59, dst, src, VEX_SIMD_66); 3884 } 3885 } 3886 3887 void Assembler::mulps(XMMRegister dst, XMMRegister src) { 3888 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3889 emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE); 3890 } 3891 3892 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3893 assert(VM_Version::supports_avx(), ""); 3894 if (VM_Version::supports_evex()) { 3895 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3896 } else { 3897 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3898 } 3899 } 3900 3901 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3902 assert(VM_Version::supports_avx(), ""); 3903 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector_len); 3904 } 3905 3906 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3907 assert(VM_Version::supports_avx(), ""); 3908 if (VM_Version::supports_evex()) { 3909 tuple_type = EVEX_FV; 3910 input_size_in_bits = EVEX_64bit; 3911 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3912 } else { 3913 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3914 } 3915 } 3916 3917 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3918 assert(VM_Version::supports_avx(), ""); 3919 if (VM_Version::supports_evex()) { 3920 tuple_type = EVEX_FV; 3921 input_size_in_bits = EVEX_32bit; 3922 } 3923 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector_len); 3924 } 3925 3926 void Assembler::divpd(XMMRegister dst, XMMRegister src) { 3927 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3928 if (VM_Version::supports_evex()) { 3929 emit_simd_arith_q(0x5E, dst, src, VEX_SIMD_66); 3930 } else { 3931 emit_simd_arith(0x5E, dst, src, VEX_SIMD_66); 3932 } 3933 } 3934 3935 void Assembler::divps(XMMRegister dst, XMMRegister src) { 3936 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3937 emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE); 3938 } 3939 3940 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3941 assert(VM_Version::supports_avx(), ""); 3942 if (VM_Version::supports_evex()) { 3943 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3944 } else { 3945 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3946 } 3947 } 3948 3949 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3950 assert(VM_Version::supports_avx(), ""); 3951 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector_len); 3952 } 3953 3954 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3955 assert(VM_Version::supports_avx(), ""); 3956 if (VM_Version::supports_evex()) { 3957 tuple_type = EVEX_FV; 3958 input_size_in_bits = EVEX_64bit; 3959 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3960 } else { 3961 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3962 } 3963 } 3964 3965 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3966 assert(VM_Version::supports_avx(), ""); 3967 if (VM_Version::supports_evex()) { 3968 tuple_type = EVEX_FV; 3969 input_size_in_bits = EVEX_32bit; 3970 } 3971 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector_len); 3972 } 3973 3974 void Assembler::andpd(XMMRegister dst, XMMRegister src) { 3975 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3976 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 3977 emit_simd_arith_q(0x54, dst, src, VEX_SIMD_66); 3978 } else { 3979 emit_simd_arith(0x54, dst, src, VEX_SIMD_66, false, true); 3980 } 3981 } 3982 3983 void Assembler::andps(XMMRegister dst, XMMRegister src) { 3984 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3985 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE, false, 3986 (VM_Version::supports_avx512dq() == false)); 3987 } 3988 3989 void Assembler::andps(XMMRegister dst, Address src) { 3990 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3991 if (VM_Version::supports_evex()) { 3992 tuple_type = EVEX_FV; 3993 input_size_in_bits = EVEX_32bit; 3994 } 3995 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE, 3996 false, (VM_Version::supports_avx512dq() == false)); 3997 } 3998 3999 void Assembler::andpd(XMMRegister dst, Address src) { 4000 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4001 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4002 tuple_type = EVEX_FV; 4003 input_size_in_bits = EVEX_64bit; 4004 emit_simd_arith_q(0x54, dst, src, VEX_SIMD_66); 4005 } else { 4006 emit_simd_arith(0x54, dst, src, VEX_SIMD_66, false, true); 4007 } 4008 } 4009 4010 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4011 assert(VM_Version::supports_avx(), ""); 4012 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4013 emit_vex_arith_q(0x54, dst, nds, src, VEX_SIMD_66, vector_len); 4014 } else { 4015 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector_len, true); 4016 } 4017 } 4018 4019 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4020 assert(VM_Version::supports_avx(), ""); 4021 bool legacy_mode = (VM_Version::supports_avx512dq() == false); 4022 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, legacy_mode); 4023 } 4024 4025 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4026 assert(VM_Version::supports_avx(), ""); 4027 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4028 tuple_type = EVEX_FV; 4029 input_size_in_bits = EVEX_64bit; 4030 emit_vex_arith_q(0x54, dst, nds, src, VEX_SIMD_66, vector_len); 4031 } else { 4032 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector_len, true); 4033 } 4034 } 4035 4036 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4037 assert(VM_Version::supports_avx(), ""); 4038 if (VM_Version::supports_evex()) { 4039 tuple_type = EVEX_FV; 4040 input_size_in_bits = EVEX_32bit; 4041 } 4042 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, 4043 (VM_Version::supports_avx512dq() == false)); 4044 } 4045 4046 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { 4047 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4048 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4049 emit_simd_arith_q(0x57, dst, src, VEX_SIMD_66); 4050 } else { 4051 emit_simd_arith(0x57, dst, src, VEX_SIMD_66, false, true); 4052 } 4053 } 4054 4055 void Assembler::xorps(XMMRegister dst, XMMRegister src) { 4056 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4057 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE, 4058 false, (VM_Version::supports_avx512dq() == false)); 4059 } 4060 4061 void Assembler::xorpd(XMMRegister dst, Address src) { 4062 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4063 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4064 tuple_type = EVEX_FV; 4065 input_size_in_bits = EVEX_64bit; 4066 emit_simd_arith_q(0x57, dst, src, VEX_SIMD_66); 4067 } else { 4068 emit_simd_arith(0x57, dst, src, VEX_SIMD_66, false, true); 4069 } 4070 } 4071 4072 void Assembler::xorps(XMMRegister dst, Address src) { 4073 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4074 if (VM_Version::supports_evex()) { 4075 tuple_type = EVEX_FV; 4076 input_size_in_bits = EVEX_32bit; 4077 } 4078 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE, false, 4079 (VM_Version::supports_avx512dq() == false)); 4080 } 4081 4082 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4083 assert(VM_Version::supports_avx(), ""); 4084 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4085 emit_vex_arith_q(0x57, dst, nds, src, VEX_SIMD_66, vector_len); 4086 } else { 4087 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector_len, true); 4088 } 4089 } 4090 4091 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4092 assert(VM_Version::supports_avx(), ""); 4093 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector_len, 4094 (VM_Version::supports_avx512dq() == false)); 4095 } 4096 4097 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4098 assert(VM_Version::supports_avx(), ""); 4099 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4100 tuple_type = EVEX_FV; 4101 input_size_in_bits = EVEX_64bit; 4102 emit_vex_arith_q(0x57, dst, nds, src, VEX_SIMD_66, vector_len); 4103 } else { 4104 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector_len, true); 4105 } 4106 } 4107 4108 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4109 assert(VM_Version::supports_avx(), ""); 4110 if (VM_Version::supports_evex()) { 4111 tuple_type = EVEX_FV; 4112 input_size_in_bits = EVEX_32bit; 4113 } 4114 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector_len, 4115 (VM_Version::supports_avx512dq() == false)); 4116 } 4117 4118 // Integer vector arithmetic 4119 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4120 assert(VM_Version::supports_avx() && (vector_len == 0) || 4121 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 4122 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, 4123 VEX_OPCODE_0F_38, true, false); 4124 emit_int8(0x01); 4125 emit_int8((unsigned char)(0xC0 | encode)); 4126 } 4127 4128 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4129 assert(VM_Version::supports_avx() && (vector_len == 0) || 4130 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 4131 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, 4132 VEX_OPCODE_0F_38, true, false); 4133 emit_int8(0x02); 4134 emit_int8((unsigned char)(0xC0 | encode)); 4135 } 4136 4137 void Assembler::paddb(XMMRegister dst, XMMRegister src) { 4138 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4139 emit_simd_arith(0xFC, dst, src, VEX_SIMD_66); 4140 } 4141 4142 void Assembler::paddw(XMMRegister dst, XMMRegister src) { 4143 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4144 emit_simd_arith(0xFD, dst, src, VEX_SIMD_66); 4145 } 4146 4147 void Assembler::paddd(XMMRegister dst, XMMRegister src) { 4148 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4149 emit_simd_arith(0xFE, dst, src, VEX_SIMD_66); 4150 } 4151 4152 void Assembler::paddq(XMMRegister dst, XMMRegister src) { 4153 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4154 if (VM_Version::supports_evex()) { 4155 emit_simd_arith_q(0xD4, dst, src, VEX_SIMD_66); 4156 } else { 4157 emit_simd_arith(0xD4, dst, src, VEX_SIMD_66); 4158 } 4159 } 4160 4161 void Assembler::phaddw(XMMRegister dst, XMMRegister src) { 4162 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 4163 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 4164 VEX_OPCODE_0F_38, false, AVX_128bit, true); 4165 emit_int8(0x01); 4166 emit_int8((unsigned char)(0xC0 | encode)); 4167 } 4168 4169 void Assembler::phaddd(XMMRegister dst, XMMRegister src) { 4170 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 4171 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 4172 VEX_OPCODE_0F_38, false, AVX_128bit, true); 4173 emit_int8(0x02); 4174 emit_int8((unsigned char)(0xC0 | encode)); 4175 } 4176 4177 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4178 assert(UseAVX > 0, "requires some form of AVX"); 4179 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector_len, 4180 (VM_Version::supports_avx512bw() == false)); 4181 } 4182 4183 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4184 assert(UseAVX > 0, "requires some form of AVX"); 4185 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector_len, 4186 (VM_Version::supports_avx512bw() == false)); 4187 } 4188 4189 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4190 assert(UseAVX > 0, "requires some form of AVX"); 4191 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector_len); 4192 } 4193 4194 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4195 assert(UseAVX > 0, "requires some form of AVX"); 4196 if (VM_Version::supports_evex()) { 4197 emit_vex_arith_q(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4198 } else { 4199 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4200 } 4201 } 4202 4203 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4204 assert(UseAVX > 0, "requires some form of AVX"); 4205 if (VM_Version::supports_evex()) { 4206 tuple_type = EVEX_FVM; 4207 } 4208 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector_len); 4209 } 4210 4211 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4212 assert(UseAVX > 0, "requires some form of AVX"); 4213 if (VM_Version::supports_evex()) { 4214 tuple_type = EVEX_FVM; 4215 } 4216 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector_len); 4217 } 4218 4219 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4220 assert(UseAVX > 0, "requires some form of AVX"); 4221 if (VM_Version::supports_evex()) { 4222 tuple_type = EVEX_FV; 4223 input_size_in_bits = EVEX_32bit; 4224 } 4225 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector_len); 4226 } 4227 4228 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4229 assert(UseAVX > 0, "requires some form of AVX"); 4230 if (VM_Version::supports_evex()) { 4231 tuple_type = EVEX_FV; 4232 input_size_in_bits = EVEX_64bit; 4233 emit_vex_arith_q(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4234 } else { 4235 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4236 } 4237 } 4238 4239 void Assembler::psubb(XMMRegister dst, XMMRegister src) { 4240 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4241 emit_simd_arith(0xF8, dst, src, VEX_SIMD_66); 4242 } 4243 4244 void Assembler::psubw(XMMRegister dst, XMMRegister src) { 4245 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4246 emit_simd_arith(0xF9, dst, src, VEX_SIMD_66); 4247 } 4248 4249 void Assembler::psubd(XMMRegister dst, XMMRegister src) { 4250 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4251 emit_simd_arith(0xFA, dst, src, VEX_SIMD_66); 4252 } 4253 4254 void Assembler::psubq(XMMRegister dst, XMMRegister src) { 4255 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4256 if (VM_Version::supports_evex()) { 4257 emit_simd_arith_q(0xFB, dst, src, VEX_SIMD_66); 4258 } else { 4259 emit_simd_arith(0xFB, dst, src, VEX_SIMD_66); 4260 } 4261 } 4262 4263 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4264 assert(UseAVX > 0, "requires some form of AVX"); 4265 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector_len, 4266 (VM_Version::supports_avx512bw() == false)); 4267 } 4268 4269 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4270 assert(UseAVX > 0, "requires some form of AVX"); 4271 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector_len, 4272 (VM_Version::supports_avx512bw() == false)); 4273 } 4274 4275 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4276 assert(UseAVX > 0, "requires some form of AVX"); 4277 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector_len); 4278 } 4279 4280 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4281 assert(UseAVX > 0, "requires some form of AVX"); 4282 if (VM_Version::supports_evex()) { 4283 emit_vex_arith_q(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4284 } else { 4285 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4286 } 4287 } 4288 4289 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4290 assert(UseAVX > 0, "requires some form of AVX"); 4291 if (VM_Version::supports_evex()) { 4292 tuple_type = EVEX_FVM; 4293 } 4294 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector_len, 4295 (VM_Version::supports_avx512bw() == false)); 4296 } 4297 4298 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4299 assert(UseAVX > 0, "requires some form of AVX"); 4300 if (VM_Version::supports_evex()) { 4301 tuple_type = EVEX_FVM; 4302 } 4303 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector_len, 4304 (VM_Version::supports_avx512bw() == false)); 4305 } 4306 4307 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4308 assert(UseAVX > 0, "requires some form of AVX"); 4309 if (VM_Version::supports_evex()) { 4310 tuple_type = EVEX_FV; 4311 input_size_in_bits = EVEX_32bit; 4312 } 4313 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector_len); 4314 } 4315 4316 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4317 assert(UseAVX > 0, "requires some form of AVX"); 4318 if (VM_Version::supports_evex()) { 4319 tuple_type = EVEX_FV; 4320 input_size_in_bits = EVEX_64bit; 4321 emit_vex_arith_q(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4322 } else { 4323 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4324 } 4325 } 4326 4327 void Assembler::pmullw(XMMRegister dst, XMMRegister src) { 4328 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4329 emit_simd_arith(0xD5, dst, src, VEX_SIMD_66, 4330 (VM_Version::supports_avx512bw() == false)); 4331 } 4332 4333 void Assembler::pmulld(XMMRegister dst, XMMRegister src) { 4334 assert(VM_Version::supports_sse4_1(), ""); 4335 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, 4336 false, VEX_OPCODE_0F_38); 4337 emit_int8(0x40); 4338 emit_int8((unsigned char)(0xC0 | encode)); 4339 } 4340 4341 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4342 assert(UseAVX > 0, "requires some form of AVX"); 4343 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector_len, 4344 (VM_Version::supports_avx512bw() == false)); 4345 } 4346 4347 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4348 assert(UseAVX > 0, "requires some form of AVX"); 4349 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, 4350 vector_len, VEX_OPCODE_0F_38); 4351 emit_int8(0x40); 4352 emit_int8((unsigned char)(0xC0 | encode)); 4353 } 4354 4355 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4356 assert(UseAVX > 2, "requires some form of AVX"); 4357 int src_enc = src->encoding(); 4358 int dst_enc = dst->encoding(); 4359 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4360 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, VEX_SIMD_66, 4361 VEX_OPCODE_0F_38, true, vector_len, false, false); 4362 emit_int8(0x40); 4363 emit_int8((unsigned char)(0xC0 | encode)); 4364 } 4365 4366 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4367 assert(UseAVX > 0, "requires some form of AVX"); 4368 if (VM_Version::supports_evex()) { 4369 tuple_type = EVEX_FVM; 4370 } 4371 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector_len); 4372 } 4373 4374 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4375 assert(UseAVX > 0, "requires some form of AVX"); 4376 if (VM_Version::supports_evex()) { 4377 tuple_type = EVEX_FV; 4378 input_size_in_bits = EVEX_32bit; 4379 } 4380 InstructionMark im(this); 4381 int dst_enc = dst->encoding(); 4382 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4383 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, 4384 VEX_OPCODE_0F_38, false, vector_len); 4385 emit_int8(0x40); 4386 emit_operand(dst, src); 4387 } 4388 4389 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4390 assert(UseAVX > 0, "requires some form of AVX"); 4391 if (VM_Version::supports_evex()) { 4392 tuple_type = EVEX_FV; 4393 input_size_in_bits = EVEX_64bit; 4394 } 4395 InstructionMark im(this); 4396 int dst_enc = dst->encoding(); 4397 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4398 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len); 4399 emit_int8(0x40); 4400 emit_operand(dst, src); 4401 } 4402 4403 // Shift packed integers left by specified number of bits. 4404 void Assembler::psllw(XMMRegister dst, int shift) { 4405 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4406 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 4407 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, 4408 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 4409 emit_int8(0x71); 4410 emit_int8((unsigned char)(0xC0 | encode)); 4411 emit_int8(shift & 0xFF); 4412 } 4413 4414 void Assembler::pslld(XMMRegister dst, int shift) { 4415 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4416 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 4417 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, false); 4418 emit_int8(0x72); 4419 emit_int8((unsigned char)(0xC0 | encode)); 4420 emit_int8(shift & 0xFF); 4421 } 4422 4423 void Assembler::psllq(XMMRegister dst, int shift) { 4424 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4425 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 4426 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, true); 4427 emit_int8(0x73); 4428 emit_int8((unsigned char)(0xC0 | encode)); 4429 emit_int8(shift & 0xFF); 4430 } 4431 4432 void Assembler::psllw(XMMRegister dst, XMMRegister shift) { 4433 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4434 emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66, false, 4435 (VM_Version::supports_avx512bw() == false)); 4436 } 4437 4438 void Assembler::pslld(XMMRegister dst, XMMRegister shift) { 4439 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4440 emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66); 4441 } 4442 4443 void Assembler::psllq(XMMRegister dst, XMMRegister shift) { 4444 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4445 if (VM_Version::supports_evex()) { 4446 emit_simd_arith_q(0xF3, dst, shift, VEX_SIMD_66); 4447 } else { 4448 emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66); 4449 } 4450 } 4451 4452 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4453 assert(UseAVX > 0, "requires some form of AVX"); 4454 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 4455 emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector_len, 4456 (VM_Version::supports_avx512bw() == false)); 4457 emit_int8(shift & 0xFF); 4458 } 4459 4460 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4461 assert(UseAVX > 0, "requires some form of AVX"); 4462 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 4463 emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector_len); 4464 emit_int8(shift & 0xFF); 4465 } 4466 4467 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4468 assert(UseAVX > 0, "requires some form of AVX"); 4469 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 4470 if (VM_Version::supports_evex()) { 4471 emit_vex_arith_q(0x73, xmm6, dst, src, VEX_SIMD_66, vector_len); 4472 } else { 4473 emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector_len); 4474 } 4475 emit_int8(shift & 0xFF); 4476 } 4477 4478 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4479 assert(UseAVX > 0, "requires some form of AVX"); 4480 emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector_len, 4481 (VM_Version::supports_avx512bw() == false)); 4482 } 4483 4484 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4485 assert(UseAVX > 0, "requires some form of AVX"); 4486 emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector_len); 4487 } 4488 4489 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4490 assert(UseAVX > 0, "requires some form of AVX"); 4491 if (VM_Version::supports_evex()) { 4492 emit_vex_arith_q(0xF3, dst, src, shift, VEX_SIMD_66, vector_len); 4493 } else { 4494 emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector_len); 4495 } 4496 } 4497 4498 // Shift packed integers logically right by specified number of bits. 4499 void Assembler::psrlw(XMMRegister dst, int shift) { 4500 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4501 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 4502 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, 4503 (VM_Version::supports_avx512bw() == false)); 4504 emit_int8(0x71); 4505 emit_int8((unsigned char)(0xC0 | encode)); 4506 emit_int8(shift & 0xFF); 4507 } 4508 4509 void Assembler::psrld(XMMRegister dst, int shift) { 4510 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4511 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 4512 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, false); 4513 emit_int8(0x72); 4514 emit_int8((unsigned char)(0xC0 | encode)); 4515 emit_int8(shift & 0xFF); 4516 } 4517 4518 void Assembler::psrlq(XMMRegister dst, int shift) { 4519 // Do not confuse it with psrldq SSE2 instruction which 4520 // shifts 128 bit value in xmm register by number of bytes. 4521 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4522 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4523 int encode = 0; 4524 if (VM_Version::supports_evex() && VM_Version::supports_avx512bw()) { 4525 encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false); 4526 } else { 4527 encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, true); 4528 } 4529 emit_int8(0x73); 4530 emit_int8((unsigned char)(0xC0 | encode)); 4531 emit_int8(shift & 0xFF); 4532 } 4533 4534 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { 4535 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4536 emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66, false, 4537 (VM_Version::supports_avx512bw() == false)); 4538 } 4539 4540 void Assembler::psrld(XMMRegister dst, XMMRegister shift) { 4541 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4542 emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66); 4543 } 4544 4545 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { 4546 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4547 if (VM_Version::supports_evex()) { 4548 emit_simd_arith_q(0xD3, dst, shift, VEX_SIMD_66); 4549 } else { 4550 emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66); 4551 } 4552 } 4553 4554 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4555 assert(UseAVX > 0, "requires some form of AVX"); 4556 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4557 emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector_len, 4558 (VM_Version::supports_avx512bw() == false)); 4559 emit_int8(shift & 0xFF); 4560 } 4561 4562 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4563 assert(UseAVX > 0, "requires some form of AVX"); 4564 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4565 emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector_len); 4566 emit_int8(shift & 0xFF); 4567 } 4568 4569 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4570 assert(UseAVX > 0, "requires some form of AVX"); 4571 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4572 if (VM_Version::supports_evex()) { 4573 emit_vex_arith_q(0x73, xmm2, dst, src, VEX_SIMD_66, vector_len); 4574 } else { 4575 emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector_len); 4576 } 4577 emit_int8(shift & 0xFF); 4578 } 4579 4580 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4581 assert(UseAVX > 0, "requires some form of AVX"); 4582 emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector_len, 4583 (VM_Version::supports_avx512bw() == false)); 4584 } 4585 4586 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4587 assert(UseAVX > 0, "requires some form of AVX"); 4588 emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector_len); 4589 } 4590 4591 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4592 assert(UseAVX > 0, "requires some form of AVX"); 4593 if (VM_Version::supports_evex()) { 4594 emit_vex_arith_q(0xD3, dst, src, shift, VEX_SIMD_66, vector_len); 4595 } else { 4596 emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector_len); 4597 } 4598 } 4599 4600 // Shift packed integers arithmetically right by specified number of bits. 4601 void Assembler::psraw(XMMRegister dst, int shift) { 4602 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4603 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 4604 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, 4605 (VM_Version::supports_avx512bw() == false)); 4606 emit_int8(0x71); 4607 emit_int8((unsigned char)(0xC0 | encode)); 4608 emit_int8(shift & 0xFF); 4609 } 4610 4611 void Assembler::psrad(XMMRegister dst, int shift) { 4612 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4613 // XMM4 is for /4 encoding: 66 0F 72 /4 ib 4614 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, false); 4615 emit_int8(0x72); 4616 emit_int8((unsigned char)(0xC0 | encode)); 4617 emit_int8(shift & 0xFF); 4618 } 4619 4620 void Assembler::psraw(XMMRegister dst, XMMRegister shift) { 4621 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4622 emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66, 4623 (VM_Version::supports_avx512bw() == false)); 4624 } 4625 4626 void Assembler::psrad(XMMRegister dst, XMMRegister shift) { 4627 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4628 emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66); 4629 } 4630 4631 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4632 assert(UseAVX > 0, "requires some form of AVX"); 4633 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 4634 emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector_len, 4635 (VM_Version::supports_avx512bw() == false)); 4636 emit_int8(shift & 0xFF); 4637 } 4638 4639 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4640 assert(UseAVX > 0, "requires some form of AVX"); 4641 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 4642 emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector_len); 4643 emit_int8(shift & 0xFF); 4644 } 4645 4646 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4647 assert(UseAVX > 0, "requires some form of AVX"); 4648 emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector_len, 4649 (VM_Version::supports_avx512bw() == false)); 4650 } 4651 4652 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4653 assert(UseAVX > 0, "requires some form of AVX"); 4654 emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector_len); 4655 } 4656 4657 4658 // AND packed integers 4659 void Assembler::pand(XMMRegister dst, XMMRegister src) { 4660 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4661 emit_simd_arith(0xDB, dst, src, VEX_SIMD_66); 4662 } 4663 4664 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4665 assert(UseAVX > 0, "requires some form of AVX"); 4666 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len); 4667 } 4668 4669 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4670 assert(UseAVX > 0, "requires some form of AVX"); 4671 if (VM_Version::supports_evex()) { 4672 tuple_type = EVEX_FV; 4673 input_size_in_bits = EVEX_32bit; 4674 } 4675 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len); 4676 } 4677 4678 void Assembler::por(XMMRegister dst, XMMRegister src) { 4679 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4680 emit_simd_arith(0xEB, dst, src, VEX_SIMD_66); 4681 } 4682 4683 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4684 assert(UseAVX > 0, "requires some form of AVX"); 4685 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector_len); 4686 } 4687 4688 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4689 assert(UseAVX > 0, "requires some form of AVX"); 4690 if (VM_Version::supports_evex()) { 4691 tuple_type = EVEX_FV; 4692 input_size_in_bits = EVEX_32bit; 4693 } 4694 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector_len); 4695 } 4696 4697 void Assembler::pxor(XMMRegister dst, XMMRegister src) { 4698 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4699 emit_simd_arith(0xEF, dst, src, VEX_SIMD_66); 4700 } 4701 4702 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4703 assert(UseAVX > 0, "requires some form of AVX"); 4704 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector_len); 4705 } 4706 4707 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4708 assert(UseAVX > 0, "requires some form of AVX"); 4709 if (VM_Version::supports_evex()) { 4710 tuple_type = EVEX_FV; 4711 input_size_in_bits = EVEX_32bit; 4712 } 4713 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector_len); 4714 } 4715 4716 4717 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4718 assert(VM_Version::supports_avx(), ""); 4719 int vector_len = AVX_256bit; 4720 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4721 emit_int8(0x18); 4722 emit_int8((unsigned char)(0xC0 | encode)); 4723 // 0x00 - insert into lower 128 bits 4724 // 0x01 - insert into upper 128 bits 4725 emit_int8(0x01); 4726 } 4727 4728 void Assembler::vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4729 assert(VM_Version::supports_evex(), ""); 4730 int vector_len = AVX_512bit; 4731 int src_enc = src->encoding(); 4732 int dst_enc = dst->encoding(); 4733 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4734 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, VEX_SIMD_66, 4735 VEX_OPCODE_0F_3A, true, vector_len, false, false); 4736 emit_int8(0x1A); 4737 emit_int8((unsigned char)(0xC0 | encode)); 4738 // 0x00 - insert into lower 256 bits 4739 // 0x01 - insert into upper 256 bits 4740 emit_int8(0x01); 4741 } 4742 4743 void Assembler::vinsertf64x4h(XMMRegister dst, Address src) { 4744 assert(VM_Version::supports_avx(), ""); 4745 if (VM_Version::supports_evex()) { 4746 tuple_type = EVEX_T4; 4747 input_size_in_bits = EVEX_64bit; 4748 } 4749 InstructionMark im(this); 4750 int vector_len = AVX_512bit; 4751 assert(dst != xnoreg, "sanity"); 4752 int dst_enc = dst->encoding(); 4753 // swap src<->dst for encoding 4754 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector_len); 4755 emit_int8(0x1A); 4756 emit_operand(dst, src); 4757 // 0x01 - insert into upper 128 bits 4758 emit_int8(0x01); 4759 } 4760 4761 void Assembler::vinsertf128h(XMMRegister dst, Address src) { 4762 assert(VM_Version::supports_avx(), ""); 4763 if (VM_Version::supports_evex()) { 4764 tuple_type = EVEX_T4; 4765 input_size_in_bits = EVEX_32bit; 4766 } 4767 InstructionMark im(this); 4768 int vector_len = AVX_256bit; 4769 assert(dst != xnoreg, "sanity"); 4770 int dst_enc = dst->encoding(); 4771 // swap src<->dst for encoding 4772 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4773 emit_int8(0x18); 4774 emit_operand(dst, src); 4775 // 0x01 - insert into upper 128 bits 4776 emit_int8(0x01); 4777 } 4778 4779 void Assembler::vextractf128h(XMMRegister dst, XMMRegister src) { 4780 assert(VM_Version::supports_avx(), ""); 4781 int vector_len = AVX_256bit; 4782 int encode = vex_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4783 emit_int8(0x19); 4784 emit_int8((unsigned char)(0xC0 | encode)); 4785 // 0x00 - insert into lower 128 bits 4786 // 0x01 - insert into upper 128 bits 4787 emit_int8(0x01); 4788 } 4789 4790 void Assembler::vextractf128h(Address dst, XMMRegister src) { 4791 assert(VM_Version::supports_avx(), ""); 4792 if (VM_Version::supports_evex()) { 4793 tuple_type = EVEX_T4; 4794 input_size_in_bits = EVEX_32bit; 4795 } 4796 InstructionMark im(this); 4797 int vector_len = AVX_256bit; 4798 assert(src != xnoreg, "sanity"); 4799 int src_enc = src->encoding(); 4800 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4801 emit_int8(0x19); 4802 emit_operand(src, dst); 4803 // 0x01 - extract from upper 128 bits 4804 emit_int8(0x01); 4805 } 4806 4807 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4808 assert(VM_Version::supports_avx2(), ""); 4809 int vector_len = AVX_256bit; 4810 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4811 emit_int8(0x38); 4812 emit_int8((unsigned char)(0xC0 | encode)); 4813 // 0x00 - insert into lower 128 bits 4814 // 0x01 - insert into upper 128 bits 4815 emit_int8(0x01); 4816 } 4817 4818 void Assembler::vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4819 assert(VM_Version::supports_evex(), ""); 4820 int vector_len = AVX_512bit; 4821 int src_enc = src->encoding(); 4822 int dst_enc = dst->encoding(); 4823 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4824 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4825 VM_Version::supports_avx512dq(), vector_len, false, false); 4826 emit_int8(0x38); 4827 emit_int8((unsigned char)(0xC0 | encode)); 4828 // 0x00 - insert into lower 256 bits 4829 // 0x01 - insert into upper 256 bits 4830 emit_int8(0x01); 4831 } 4832 4833 void Assembler::vinserti128h(XMMRegister dst, Address src) { 4834 assert(VM_Version::supports_avx2(), ""); 4835 if (VM_Version::supports_evex()) { 4836 tuple_type = EVEX_T4; 4837 input_size_in_bits = EVEX_32bit; 4838 } 4839 InstructionMark im(this); 4840 int vector_len = AVX_256bit; 4841 assert(dst != xnoreg, "sanity"); 4842 int dst_enc = dst->encoding(); 4843 // swap src<->dst for encoding 4844 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4845 emit_int8(0x38); 4846 emit_operand(dst, src); 4847 // 0x01 - insert into upper 128 bits 4848 emit_int8(0x01); 4849 } 4850 4851 void Assembler::vextracti128h(XMMRegister dst, XMMRegister src) { 4852 assert(VM_Version::supports_avx(), ""); 4853 int vector_len = AVX_256bit; 4854 int encode = vex_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4855 emit_int8(0x39); 4856 emit_int8((unsigned char)(0xC0 | encode)); 4857 // 0x00 - insert into lower 128 bits 4858 // 0x01 - insert into upper 128 bits 4859 emit_int8(0x01); 4860 } 4861 4862 void Assembler::vextracti128h(Address dst, XMMRegister src) { 4863 assert(VM_Version::supports_avx2(), ""); 4864 if (VM_Version::supports_evex()) { 4865 tuple_type = EVEX_T4; 4866 input_size_in_bits = EVEX_32bit; 4867 } 4868 InstructionMark im(this); 4869 int vector_len = AVX_256bit; 4870 assert(src != xnoreg, "sanity"); 4871 int src_enc = src->encoding(); 4872 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4873 emit_int8(0x39); 4874 emit_operand(src, dst); 4875 // 0x01 - extract from upper 128 bits 4876 emit_int8(0x01); 4877 } 4878 4879 void Assembler::vextracti64x4h(XMMRegister dst, XMMRegister src) { 4880 assert(VM_Version::supports_evex(), ""); 4881 int vector_len = AVX_512bit; 4882 int src_enc = src->encoding(); 4883 int dst_enc = dst->encoding(); 4884 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4885 true, vector_len, false, false); 4886 emit_int8(0x3B); 4887 emit_int8((unsigned char)(0xC0 | encode)); 4888 // 0x01 - extract from upper 256 bits 4889 emit_int8(0x01); 4890 } 4891 4892 void Assembler::vextracti64x2h(XMMRegister dst, XMMRegister src, int value) { 4893 assert(VM_Version::supports_evex(), ""); 4894 int vector_len = AVX_512bit; 4895 int src_enc = src->encoding(); 4896 int dst_enc = dst->encoding(); 4897 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4898 VM_Version::supports_avx512dq(), vector_len, false, false); 4899 emit_int8(0x39); 4900 emit_int8((unsigned char)(0xC0 | encode)); 4901 // 0x01 - extract from bits 255:128 4902 // 0x02 - extract from bits 383:256 4903 // 0x03 - extract from bits 511:384 4904 emit_int8(value & 0x3); 4905 } 4906 4907 void Assembler::vextractf64x4h(XMMRegister dst, XMMRegister src) { 4908 assert(VM_Version::supports_evex(), ""); 4909 int vector_len = AVX_512bit; 4910 int src_enc = src->encoding(); 4911 int dst_enc = dst->encoding(); 4912 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4913 VM_Version::supports_avx512dq(), vector_len, false, false); 4914 emit_int8(0x1B); 4915 emit_int8((unsigned char)(0xC0 | encode)); 4916 // 0x01 - extract from upper 256 bits 4917 emit_int8(0x01); 4918 } 4919 4920 void Assembler::vextractf64x4h(Address dst, XMMRegister src) { 4921 assert(VM_Version::supports_avx2(), ""); 4922 tuple_type = EVEX_T4; 4923 input_size_in_bits = EVEX_64bit; 4924 InstructionMark im(this); 4925 int vector_len = AVX_512bit; 4926 assert(src != xnoreg, "sanity"); 4927 int src_enc = src->encoding(); 4928 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4929 VM_Version::supports_avx512dq(), vector_len); 4930 emit_int8(0x1B); 4931 emit_operand(src, dst); 4932 // 0x01 - extract from upper 128 bits 4933 emit_int8(0x01); 4934 } 4935 4936 void Assembler::vextractf32x4h(XMMRegister dst, XMMRegister src, int value) { 4937 assert(VM_Version::supports_evex(), ""); 4938 int vector_len = AVX_512bit; 4939 int src_enc = src->encoding(); 4940 int dst_enc = dst->encoding(); 4941 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, 4942 VEX_OPCODE_0F_3A, false, vector_len, false, false); 4943 emit_int8(0x19); 4944 emit_int8((unsigned char)(0xC0 | encode)); 4945 // 0x01 - extract from bits 255:128 4946 // 0x02 - extract from bits 383:256 4947 // 0x03 - extract from bits 511:384 4948 emit_int8(value & 0x3); 4949 } 4950 4951 void Assembler::vextractf64x2h(XMMRegister dst, XMMRegister src, int value) { 4952 assert(VM_Version::supports_evex(), ""); 4953 int vector_len = AVX_512bit; 4954 int src_enc = src->encoding(); 4955 int dst_enc = dst->encoding(); 4956 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4957 VM_Version::supports_avx512dq(), vector_len, false, false); 4958 emit_int8(0x19); 4959 emit_int8((unsigned char)(0xC0 | encode)); 4960 // 0x01 - extract from bits 255:128 4961 // 0x02 - extract from bits 383:256 4962 // 0x03 - extract from bits 511:384 4963 emit_int8(value & 0x3); 4964 } 4965 4966 // duplicate 4-bytes integer data from src into 8 locations in dest 4967 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) { 4968 assert(VM_Version::supports_avx2(), ""); 4969 int vector_len = AVX_256bit; 4970 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 4971 vector_len, VEX_OPCODE_0F_38, false); 4972 emit_int8(0x58); 4973 emit_int8((unsigned char)(0xC0 | encode)); 4974 } 4975 4976 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL 4977 void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { 4978 assert(VM_Version::supports_evex(), ""); 4979 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 4980 vector_len, VEX_OPCODE_0F_38, false); 4981 emit_int8(0x78); 4982 emit_int8((unsigned char)(0xC0 | encode)); 4983 } 4984 4985 void Assembler::evpbroadcastb(XMMRegister dst, Address src, int vector_len) { 4986 assert(VM_Version::supports_evex(), ""); 4987 tuple_type = EVEX_T1S; 4988 input_size_in_bits = EVEX_8bit; 4989 InstructionMark im(this); 4990 assert(dst != xnoreg, "sanity"); 4991 int dst_enc = dst->encoding(); 4992 // swap src<->dst for encoding 4993 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 4994 emit_int8(0x78); 4995 emit_operand(dst, src); 4996 } 4997 4998 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL 4999 void Assembler::evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 5000 assert(VM_Version::supports_evex(), ""); 5001 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 5002 vector_len, VEX_OPCODE_0F_38, false); 5003 emit_int8(0x79); 5004 emit_int8((unsigned char)(0xC0 | encode)); 5005 } 5006 5007 void Assembler::evpbroadcastw(XMMRegister dst, Address src, int vector_len) { 5008 assert(VM_Version::supports_evex(), ""); 5009 tuple_type = EVEX_T1S; 5010 input_size_in_bits = EVEX_16bit; 5011 InstructionMark im(this); 5012 assert(dst != xnoreg, "sanity"); 5013 int dst_enc = dst->encoding(); 5014 // swap src<->dst for encoding 5015 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5016 emit_int8(0x79); 5017 emit_operand(dst, src); 5018 } 5019 5020 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5021 void Assembler::evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { 5022 assert(VM_Version::supports_evex(), ""); 5023 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 5024 vector_len, VEX_OPCODE_0F_38, false); 5025 emit_int8(0x58); 5026 emit_int8((unsigned char)(0xC0 | encode)); 5027 } 5028 5029 void Assembler::evpbroadcastd(XMMRegister dst, Address src, int vector_len) { 5030 assert(VM_Version::supports_evex(), ""); 5031 tuple_type = EVEX_T1S; 5032 input_size_in_bits = EVEX_32bit; 5033 InstructionMark im(this); 5034 assert(dst != xnoreg, "sanity"); 5035 int dst_enc = dst->encoding(); 5036 // swap src<->dst for encoding 5037 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5038 emit_int8(0x58); 5039 emit_operand(dst, src); 5040 } 5041 5042 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5043 void Assembler::evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { 5044 assert(VM_Version::supports_evex(), ""); 5045 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5046 VEX_OPCODE_0F_38, true, vector_len, false, false); 5047 emit_int8(0x59); 5048 emit_int8((unsigned char)(0xC0 | encode)); 5049 } 5050 5051 void Assembler::evpbroadcastq(XMMRegister dst, Address src, int vector_len) { 5052 assert(VM_Version::supports_evex(), ""); 5053 tuple_type = EVEX_T1S; 5054 input_size_in_bits = EVEX_64bit; 5055 InstructionMark im(this); 5056 assert(dst != xnoreg, "sanity"); 5057 int dst_enc = dst->encoding(); 5058 // swap src<->dst for encoding 5059 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len); 5060 emit_int8(0x59); 5061 emit_operand(dst, src); 5062 } 5063 5064 // duplicate single precision fp from src into 4|8|16 locations in dest : requires AVX512VL 5065 void Assembler::evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { 5066 assert(VM_Version::supports_evex(), ""); 5067 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5068 VEX_OPCODE_0F_38, false, vector_len, false, false); 5069 emit_int8(0x18); 5070 emit_int8((unsigned char)(0xC0 | encode)); 5071 } 5072 5073 void Assembler::evpbroadcastss(XMMRegister dst, Address src, int vector_len) { 5074 assert(VM_Version::supports_evex(), ""); 5075 tuple_type = EVEX_T1S; 5076 input_size_in_bits = EVEX_32bit; 5077 InstructionMark im(this); 5078 assert(dst != xnoreg, "sanity"); 5079 int dst_enc = dst->encoding(); 5080 // swap src<->dst for encoding 5081 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5082 emit_int8(0x18); 5083 emit_operand(dst, src); 5084 } 5085 5086 // duplicate double precision fp from src into 2|4|8 locations in dest : requires AVX512VL 5087 void Assembler::evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { 5088 assert(VM_Version::supports_evex(), ""); 5089 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5090 VEX_OPCODE_0F_38, true, vector_len, false, false); 5091 emit_int8(0x19); 5092 emit_int8((unsigned char)(0xC0 | encode)); 5093 } 5094 5095 void Assembler::evpbroadcastsd(XMMRegister dst, Address src, int vector_len) { 5096 assert(VM_Version::supports_evex(), ""); 5097 tuple_type = EVEX_T1S; 5098 input_size_in_bits = EVEX_64bit; 5099 InstructionMark im(this); 5100 assert(dst != xnoreg, "sanity"); 5101 int dst_enc = dst->encoding(); 5102 // swap src<->dst for encoding 5103 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len); 5104 emit_int8(0x19); 5105 emit_operand(dst, src); 5106 } 5107 5108 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL 5109 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { 5110 assert(VM_Version::supports_evex(), ""); 5111 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5112 VEX_OPCODE_0F_38, false, vector_len, false, false); 5113 emit_int8(0x7A); 5114 emit_int8((unsigned char)(0xC0 | encode)); 5115 } 5116 5117 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL 5118 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { 5119 assert(VM_Version::supports_evex(), ""); 5120 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5121 VEX_OPCODE_0F_38, false, vector_len, false, false); 5122 emit_int8(0x7B); 5123 emit_int8((unsigned char)(0xC0 | encode)); 5124 } 5125 5126 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5127 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { 5128 assert(VM_Version::supports_evex(), ""); 5129 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5130 VEX_OPCODE_0F_38, false, vector_len, false, false); 5131 emit_int8(0x7C); 5132 emit_int8((unsigned char)(0xC0 | encode)); 5133 } 5134 5135 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5136 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { 5137 assert(VM_Version::supports_evex(), ""); 5138 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5139 VEX_OPCODE_0F_38, true, vector_len, false, false); 5140 emit_int8(0x7C); 5141 emit_int8((unsigned char)(0xC0 | encode)); 5142 } 5143 5144 // Carry-Less Multiplication Quadword 5145 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { 5146 assert(VM_Version::supports_clmul(), ""); 5147 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 5148 VEX_OPCODE_0F_3A, false, AVX_128bit, true); 5149 emit_int8(0x44); 5150 emit_int8((unsigned char)(0xC0 | encode)); 5151 emit_int8((unsigned char)mask); 5152 } 5153 5154 // Carry-Less Multiplication Quadword 5155 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 5156 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 5157 int vector_len = AVX_128bit; 5158 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, 5159 vector_len, VEX_OPCODE_0F_3A, true); 5160 emit_int8(0x44); 5161 emit_int8((unsigned char)(0xC0 | encode)); 5162 emit_int8((unsigned char)mask); 5163 } 5164 5165 void Assembler::vzeroupper() { 5166 assert(VM_Version::supports_avx(), ""); 5167 if (UseAVX < 3) 5168 { 5169 (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE); 5170 emit_int8(0x77); 5171 } 5172 } 5173 5174 5175 #ifndef _LP64 5176 // 32bit only pieces of the assembler 5177 5178 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 5179 // NO PREFIX AS NEVER 64BIT 5180 InstructionMark im(this); 5181 emit_int8((unsigned char)0x81); 5182 emit_int8((unsigned char)(0xF8 | src1->encoding())); 5183 emit_data(imm32, rspec, 0); 5184 } 5185 5186 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { 5187 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs 5188 InstructionMark im(this); 5189 emit_int8((unsigned char)0x81); 5190 emit_operand(rdi, src1); 5191 emit_data(imm32, rspec, 0); 5192 } 5193 5194 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, 5195 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded 5196 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. 5197 void Assembler::cmpxchg8(Address adr) { 5198 InstructionMark im(this); 5199 emit_int8(0x0F); 5200 emit_int8((unsigned char)0xC7); 5201 emit_operand(rcx, adr); 5202 } 5203 5204 void Assembler::decl(Register dst) { 5205 // Don't use it directly. Use MacroAssembler::decrementl() instead. 5206 emit_int8(0x48 | dst->encoding()); 5207 } 5208 5209 #endif // _LP64 5210 5211 // 64bit typically doesn't use the x87 but needs to for the trig funcs 5212 5213 void Assembler::fabs() { 5214 emit_int8((unsigned char)0xD9); 5215 emit_int8((unsigned char)0xE1); 5216 } 5217 5218 void Assembler::fadd(int i) { 5219 emit_farith(0xD8, 0xC0, i); 5220 } 5221 5222 void Assembler::fadd_d(Address src) { 5223 InstructionMark im(this); 5224 emit_int8((unsigned char)0xDC); 5225 emit_operand32(rax, src); 5226 } 5227 5228 void Assembler::fadd_s(Address src) { 5229 InstructionMark im(this); 5230 emit_int8((unsigned char)0xD8); 5231 emit_operand32(rax, src); 5232 } 5233 5234 void Assembler::fadda(int i) { 5235 emit_farith(0xDC, 0xC0, i); 5236 } 5237 5238 void Assembler::faddp(int i) { 5239 emit_farith(0xDE, 0xC0, i); 5240 } 5241 5242 void Assembler::fchs() { 5243 emit_int8((unsigned char)0xD9); 5244 emit_int8((unsigned char)0xE0); 5245 } 5246 5247 void Assembler::fcom(int i) { 5248 emit_farith(0xD8, 0xD0, i); 5249 } 5250 5251 void Assembler::fcomp(int i) { 5252 emit_farith(0xD8, 0xD8, i); 5253 } 5254 5255 void Assembler::fcomp_d(Address src) { 5256 InstructionMark im(this); 5257 emit_int8((unsigned char)0xDC); 5258 emit_operand32(rbx, src); 5259 } 5260 5261 void Assembler::fcomp_s(Address src) { 5262 InstructionMark im(this); 5263 emit_int8((unsigned char)0xD8); 5264 emit_operand32(rbx, src); 5265 } 5266 5267 void Assembler::fcompp() { 5268 emit_int8((unsigned char)0xDE); 5269 emit_int8((unsigned char)0xD9); 5270 } 5271 5272 void Assembler::fcos() { 5273 emit_int8((unsigned char)0xD9); 5274 emit_int8((unsigned char)0xFF); 5275 } 5276 5277 void Assembler::fdecstp() { 5278 emit_int8((unsigned char)0xD9); 5279 emit_int8((unsigned char)0xF6); 5280 } 5281 5282 void Assembler::fdiv(int i) { 5283 emit_farith(0xD8, 0xF0, i); 5284 } 5285 5286 void Assembler::fdiv_d(Address src) { 5287 InstructionMark im(this); 5288 emit_int8((unsigned char)0xDC); 5289 emit_operand32(rsi, src); 5290 } 5291 5292 void Assembler::fdiv_s(Address src) { 5293 InstructionMark im(this); 5294 emit_int8((unsigned char)0xD8); 5295 emit_operand32(rsi, src); 5296 } 5297 5298 void Assembler::fdiva(int i) { 5299 emit_farith(0xDC, 0xF8, i); 5300 } 5301 5302 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) 5303 // is erroneous for some of the floating-point instructions below. 5304 5305 void Assembler::fdivp(int i) { 5306 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) 5307 } 5308 5309 void Assembler::fdivr(int i) { 5310 emit_farith(0xD8, 0xF8, i); 5311 } 5312 5313 void Assembler::fdivr_d(Address src) { 5314 InstructionMark im(this); 5315 emit_int8((unsigned char)0xDC); 5316 emit_operand32(rdi, src); 5317 } 5318 5319 void Assembler::fdivr_s(Address src) { 5320 InstructionMark im(this); 5321 emit_int8((unsigned char)0xD8); 5322 emit_operand32(rdi, src); 5323 } 5324 5325 void Assembler::fdivra(int i) { 5326 emit_farith(0xDC, 0xF0, i); 5327 } 5328 5329 void Assembler::fdivrp(int i) { 5330 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) 5331 } 5332 5333 void Assembler::ffree(int i) { 5334 emit_farith(0xDD, 0xC0, i); 5335 } 5336 5337 void Assembler::fild_d(Address adr) { 5338 InstructionMark im(this); 5339 emit_int8((unsigned char)0xDF); 5340 emit_operand32(rbp, adr); 5341 } 5342 5343 void Assembler::fild_s(Address adr) { 5344 InstructionMark im(this); 5345 emit_int8((unsigned char)0xDB); 5346 emit_operand32(rax, adr); 5347 } 5348 5349 void Assembler::fincstp() { 5350 emit_int8((unsigned char)0xD9); 5351 emit_int8((unsigned char)0xF7); 5352 } 5353 5354 void Assembler::finit() { 5355 emit_int8((unsigned char)0x9B); 5356 emit_int8((unsigned char)0xDB); 5357 emit_int8((unsigned char)0xE3); 5358 } 5359 5360 void Assembler::fist_s(Address adr) { 5361 InstructionMark im(this); 5362 emit_int8((unsigned char)0xDB); 5363 emit_operand32(rdx, adr); 5364 } 5365 5366 void Assembler::fistp_d(Address adr) { 5367 InstructionMark im(this); 5368 emit_int8((unsigned char)0xDF); 5369 emit_operand32(rdi, adr); 5370 } 5371 5372 void Assembler::fistp_s(Address adr) { 5373 InstructionMark im(this); 5374 emit_int8((unsigned char)0xDB); 5375 emit_operand32(rbx, adr); 5376 } 5377 5378 void Assembler::fld1() { 5379 emit_int8((unsigned char)0xD9); 5380 emit_int8((unsigned char)0xE8); 5381 } 5382 5383 void Assembler::fld_d(Address adr) { 5384 InstructionMark im(this); 5385 emit_int8((unsigned char)0xDD); 5386 emit_operand32(rax, adr); 5387 } 5388 5389 void Assembler::fld_s(Address adr) { 5390 InstructionMark im(this); 5391 emit_int8((unsigned char)0xD9); 5392 emit_operand32(rax, adr); 5393 } 5394 5395 5396 void Assembler::fld_s(int index) { 5397 emit_farith(0xD9, 0xC0, index); 5398 } 5399 5400 void Assembler::fld_x(Address adr) { 5401 InstructionMark im(this); 5402 emit_int8((unsigned char)0xDB); 5403 emit_operand32(rbp, adr); 5404 } 5405 5406 void Assembler::fldcw(Address src) { 5407 InstructionMark im(this); 5408 emit_int8((unsigned char)0xD9); 5409 emit_operand32(rbp, src); 5410 } 5411 5412 void Assembler::fldenv(Address src) { 5413 InstructionMark im(this); 5414 emit_int8((unsigned char)0xD9); 5415 emit_operand32(rsp, src); 5416 } 5417 5418 void Assembler::fldlg2() { 5419 emit_int8((unsigned char)0xD9); 5420 emit_int8((unsigned char)0xEC); 5421 } 5422 5423 void Assembler::fldln2() { 5424 emit_int8((unsigned char)0xD9); 5425 emit_int8((unsigned char)0xED); 5426 } 5427 5428 void Assembler::fldz() { 5429 emit_int8((unsigned char)0xD9); 5430 emit_int8((unsigned char)0xEE); 5431 } 5432 5433 void Assembler::flog() { 5434 fldln2(); 5435 fxch(); 5436 fyl2x(); 5437 } 5438 5439 void Assembler::flog10() { 5440 fldlg2(); 5441 fxch(); 5442 fyl2x(); 5443 } 5444 5445 void Assembler::fmul(int i) { 5446 emit_farith(0xD8, 0xC8, i); 5447 } 5448 5449 void Assembler::fmul_d(Address src) { 5450 InstructionMark im(this); 5451 emit_int8((unsigned char)0xDC); 5452 emit_operand32(rcx, src); 5453 } 5454 5455 void Assembler::fmul_s(Address src) { 5456 InstructionMark im(this); 5457 emit_int8((unsigned char)0xD8); 5458 emit_operand32(rcx, src); 5459 } 5460 5461 void Assembler::fmula(int i) { 5462 emit_farith(0xDC, 0xC8, i); 5463 } 5464 5465 void Assembler::fmulp(int i) { 5466 emit_farith(0xDE, 0xC8, i); 5467 } 5468 5469 void Assembler::fnsave(Address dst) { 5470 InstructionMark im(this); 5471 emit_int8((unsigned char)0xDD); 5472 emit_operand32(rsi, dst); 5473 } 5474 5475 void Assembler::fnstcw(Address src) { 5476 InstructionMark im(this); 5477 emit_int8((unsigned char)0x9B); 5478 emit_int8((unsigned char)0xD9); 5479 emit_operand32(rdi, src); 5480 } 5481 5482 void Assembler::fnstsw_ax() { 5483 emit_int8((unsigned char)0xDF); 5484 emit_int8((unsigned char)0xE0); 5485 } 5486 5487 void Assembler::fprem() { 5488 emit_int8((unsigned char)0xD9); 5489 emit_int8((unsigned char)0xF8); 5490 } 5491 5492 void Assembler::fprem1() { 5493 emit_int8((unsigned char)0xD9); 5494 emit_int8((unsigned char)0xF5); 5495 } 5496 5497 void Assembler::frstor(Address src) { 5498 InstructionMark im(this); 5499 emit_int8((unsigned char)0xDD); 5500 emit_operand32(rsp, src); 5501 } 5502 5503 void Assembler::fsin() { 5504 emit_int8((unsigned char)0xD9); 5505 emit_int8((unsigned char)0xFE); 5506 } 5507 5508 void Assembler::fsqrt() { 5509 emit_int8((unsigned char)0xD9); 5510 emit_int8((unsigned char)0xFA); 5511 } 5512 5513 void Assembler::fst_d(Address adr) { 5514 InstructionMark im(this); 5515 emit_int8((unsigned char)0xDD); 5516 emit_operand32(rdx, adr); 5517 } 5518 5519 void Assembler::fst_s(Address adr) { 5520 InstructionMark im(this); 5521 emit_int8((unsigned char)0xD9); 5522 emit_operand32(rdx, adr); 5523 } 5524 5525 void Assembler::fstp_d(Address adr) { 5526 InstructionMark im(this); 5527 emit_int8((unsigned char)0xDD); 5528 emit_operand32(rbx, adr); 5529 } 5530 5531 void Assembler::fstp_d(int index) { 5532 emit_farith(0xDD, 0xD8, index); 5533 } 5534 5535 void Assembler::fstp_s(Address adr) { 5536 InstructionMark im(this); 5537 emit_int8((unsigned char)0xD9); 5538 emit_operand32(rbx, adr); 5539 } 5540 5541 void Assembler::fstp_x(Address adr) { 5542 InstructionMark im(this); 5543 emit_int8((unsigned char)0xDB); 5544 emit_operand32(rdi, adr); 5545 } 5546 5547 void Assembler::fsub(int i) { 5548 emit_farith(0xD8, 0xE0, i); 5549 } 5550 5551 void Assembler::fsub_d(Address src) { 5552 InstructionMark im(this); 5553 emit_int8((unsigned char)0xDC); 5554 emit_operand32(rsp, src); 5555 } 5556 5557 void Assembler::fsub_s(Address src) { 5558 InstructionMark im(this); 5559 emit_int8((unsigned char)0xD8); 5560 emit_operand32(rsp, src); 5561 } 5562 5563 void Assembler::fsuba(int i) { 5564 emit_farith(0xDC, 0xE8, i); 5565 } 5566 5567 void Assembler::fsubp(int i) { 5568 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) 5569 } 5570 5571 void Assembler::fsubr(int i) { 5572 emit_farith(0xD8, 0xE8, i); 5573 } 5574 5575 void Assembler::fsubr_d(Address src) { 5576 InstructionMark im(this); 5577 emit_int8((unsigned char)0xDC); 5578 emit_operand32(rbp, src); 5579 } 5580 5581 void Assembler::fsubr_s(Address src) { 5582 InstructionMark im(this); 5583 emit_int8((unsigned char)0xD8); 5584 emit_operand32(rbp, src); 5585 } 5586 5587 void Assembler::fsubra(int i) { 5588 emit_farith(0xDC, 0xE0, i); 5589 } 5590 5591 void Assembler::fsubrp(int i) { 5592 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) 5593 } 5594 5595 void Assembler::ftan() { 5596 emit_int8((unsigned char)0xD9); 5597 emit_int8((unsigned char)0xF2); 5598 emit_int8((unsigned char)0xDD); 5599 emit_int8((unsigned char)0xD8); 5600 } 5601 5602 void Assembler::ftst() { 5603 emit_int8((unsigned char)0xD9); 5604 emit_int8((unsigned char)0xE4); 5605 } 5606 5607 void Assembler::fucomi(int i) { 5608 // make sure the instruction is supported (introduced for P6, together with cmov) 5609 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 5610 emit_farith(0xDB, 0xE8, i); 5611 } 5612 5613 void Assembler::fucomip(int i) { 5614 // make sure the instruction is supported (introduced for P6, together with cmov) 5615 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 5616 emit_farith(0xDF, 0xE8, i); 5617 } 5618 5619 void Assembler::fwait() { 5620 emit_int8((unsigned char)0x9B); 5621 } 5622 5623 void Assembler::fxch(int i) { 5624 emit_farith(0xD9, 0xC8, i); 5625 } 5626 5627 void Assembler::fyl2x() { 5628 emit_int8((unsigned char)0xD9); 5629 emit_int8((unsigned char)0xF1); 5630 } 5631 5632 void Assembler::frndint() { 5633 emit_int8((unsigned char)0xD9); 5634 emit_int8((unsigned char)0xFC); 5635 } 5636 5637 void Assembler::f2xm1() { 5638 emit_int8((unsigned char)0xD9); 5639 emit_int8((unsigned char)0xF0); 5640 } 5641 5642 void Assembler::fldl2e() { 5643 emit_int8((unsigned char)0xD9); 5644 emit_int8((unsigned char)0xEA); 5645 } 5646 5647 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. 5648 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; 5649 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding. 5650 static int simd_opc[4] = { 0, 0, 0x38, 0x3A }; 5651 5652 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding. 5653 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 5654 if (pre > 0) { 5655 emit_int8(simd_pre[pre]); 5656 } 5657 if (rex_w) { 5658 prefixq(adr, xreg); 5659 } else { 5660 prefix(adr, xreg); 5661 } 5662 if (opc > 0) { 5663 emit_int8(0x0F); 5664 int opc2 = simd_opc[opc]; 5665 if (opc2 > 0) { 5666 emit_int8(opc2); 5667 } 5668 } 5669 } 5670 5671 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 5672 if (pre > 0) { 5673 emit_int8(simd_pre[pre]); 5674 } 5675 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : 5676 prefix_and_encode(dst_enc, src_enc); 5677 if (opc > 0) { 5678 emit_int8(0x0F); 5679 int opc2 = simd_opc[opc]; 5680 if (opc2 > 0) { 5681 emit_int8(opc2); 5682 } 5683 } 5684 return encode; 5685 } 5686 5687 5688 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, int vector_len) { 5689 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { 5690 prefix(VEX_3bytes); 5691 5692 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); 5693 byte1 = (~byte1) & 0xE0; 5694 byte1 |= opc; 5695 emit_int8(byte1); 5696 5697 int byte2 = ((~nds_enc) & 0xf) << 3; 5698 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; 5699 emit_int8(byte2); 5700 } else { 5701 prefix(VEX_2bytes); 5702 5703 int byte1 = vex_r ? VEX_R : 0; 5704 byte1 = (~byte1) & 0x80; 5705 byte1 |= ((~nds_enc) & 0xf) << 3; 5706 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; 5707 emit_int8(byte1); 5708 } 5709 } 5710 5711 // This is a 4 byte encoding 5712 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, bool evex_r, bool evex_v, 5713 int nds_enc, VexSimdPrefix pre, VexOpcode opc, 5714 bool is_extended_context, bool is_merge_context, 5715 int vector_len, bool no_mask_reg ){ 5716 // EVEX 0x62 prefix 5717 prefix(EVEX_4bytes); 5718 evex_encoding = (vex_w ? VEX_W : 0) | (evex_r ? EVEX_Rb : 0); 5719 5720 // P0: byte 2, initialized to RXBR`00mm 5721 // instead of not'd 5722 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); 5723 byte2 = (~byte2) & 0xF0; 5724 // confine opc opcode extensions in mm bits to lower two bits 5725 // of form {0F, 0F_38, 0F_3A} 5726 byte2 |= opc; 5727 emit_int8(byte2); 5728 5729 // P1: byte 3 as Wvvvv1pp 5730 int byte3 = ((~nds_enc) & 0xf) << 3; 5731 // p[10] is always 1 5732 byte3 |= EVEX_F; 5733 byte3 |= (vex_w & 1) << 7; 5734 // confine pre opcode extensions in pp bits to lower two bits 5735 // of form {66, F3, F2} 5736 byte3 |= pre; 5737 emit_int8(byte3); 5738 5739 // P2: byte 4 as zL'Lbv'aaa 5740 int byte4 = (no_mask_reg) ? 0 : 1; // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now) 5741 // EVEX.v` for extending EVEX.vvvv or VIDX 5742 byte4 |= (evex_v ? 0: EVEX_V); 5743 // third EXEC.b for broadcast actions 5744 byte4 |= (is_extended_context ? EVEX_Rb : 0); 5745 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024 5746 byte4 |= ((vector_len) & 0x3) << 5; 5747 // last is EVEX.z for zero/merge actions 5748 byte4 |= (is_merge_context ? EVEX_Z : 0); 5749 emit_int8(byte4); 5750 } 5751 5752 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, 5753 VexOpcode opc, bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg) { 5754 bool vex_r = ((xreg_enc & 8) == 8) ? 1 : 0; 5755 bool vex_b = adr.base_needs_rex(); 5756 bool vex_x = adr.index_needs_rex(); 5757 avx_vector_len = vector_len; 5758 5759 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit 5760 if (VM_Version::supports_avx512vl() == false) { 5761 switch (vector_len) { 5762 case AVX_128bit: 5763 case AVX_256bit: 5764 legacy_mode = true; 5765 break; 5766 } 5767 } 5768 5769 if ((UseAVX > 2) && (legacy_mode == false)) 5770 { 5771 bool evex_r = (xreg_enc >= 16); 5772 bool evex_v = (nds_enc >= 16); 5773 is_evex_instruction = true; 5774 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg); 5775 } else { 5776 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector_len); 5777 } 5778 } 5779 5780 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, 5781 bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg ) { 5782 bool vex_r = ((dst_enc & 8) == 8) ? 1 : 0; 5783 bool vex_b = ((src_enc & 8) == 8) ? 1 : 0; 5784 bool vex_x = false; 5785 avx_vector_len = vector_len; 5786 5787 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit 5788 if (VM_Version::supports_avx512vl() == false) { 5789 switch (vector_len) { 5790 case AVX_128bit: 5791 case AVX_256bit: 5792 legacy_mode = true; 5793 break; 5794 } 5795 } 5796 5797 if ((UseAVX > 2) && (legacy_mode == false)) 5798 { 5799 bool evex_r = (dst_enc >= 16); 5800 bool evex_v = (nds_enc >= 16); 5801 // can use vex_x as bank extender on rm encoding 5802 vex_x = (src_enc >= 16); 5803 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg); 5804 } else { 5805 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector_len); 5806 } 5807 5808 // return modrm byte components for operands 5809 return (((dst_enc & 7) << 3) | (src_enc & 7)); 5810 } 5811 5812 5813 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 5814 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len, bool legacy_mode) { 5815 if (UseAVX > 0) { 5816 int xreg_enc = xreg->encoding(); 5817 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5818 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector_len, legacy_mode, no_mask_reg); 5819 } else { 5820 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); 5821 rex_prefix(adr, xreg, pre, opc, rex_w); 5822 } 5823 } 5824 5825 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 5826 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len, bool legacy_mode) { 5827 int dst_enc = dst->encoding(); 5828 int src_enc = src->encoding(); 5829 if (UseAVX > 0) { 5830 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5831 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector_len, legacy_mode, no_mask_reg); 5832 } else { 5833 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); 5834 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w); 5835 } 5836 } 5837 5838 int Assembler::kreg_prefix_and_encode(KRegister dst, KRegister nds, KRegister src, VexSimdPrefix pre, 5839 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len) { 5840 int dst_enc = dst->encoding(); 5841 int src_enc = src->encoding(); 5842 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5843 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector_len, true, no_mask_reg); 5844 } 5845 5846 int Assembler::kreg_prefix_and_encode(KRegister dst, KRegister nds, Register src, VexSimdPrefix pre, 5847 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len) { 5848 int dst_enc = dst->encoding(); 5849 int src_enc = src->encoding(); 5850 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5851 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector_len, true, no_mask_reg); 5852 } 5853 5854 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg, bool legacy_mode) { 5855 InstructionMark im(this); 5856 simd_prefix(dst, dst, src, pre, no_mask_reg, VEX_OPCODE_0F, false, AVX_128bit, legacy_mode); 5857 emit_int8(opcode); 5858 emit_operand(dst, src); 5859 } 5860 5861 void Assembler::emit_simd_arith_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg) { 5862 InstructionMark im(this); 5863 simd_prefix_q(dst, dst, src, pre, no_mask_reg); 5864 emit_int8(opcode); 5865 emit_operand(dst, src); 5866 } 5867 5868 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg, bool legacy_mode) { 5869 int encode = simd_prefix_and_encode(dst, dst, src, pre, no_mask_reg, VEX_OPCODE_0F, false, AVX_128bit, legacy_mode); 5870 emit_int8(opcode); 5871 emit_int8((unsigned char)(0xC0 | encode)); 5872 } 5873 5874 void Assembler::emit_simd_arith_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg) { 5875 int encode = simd_prefix_and_encode(dst, dst, src, pre, no_mask_reg, VEX_OPCODE_0F, true, AVX_128bit); 5876 emit_int8(opcode); 5877 emit_int8((unsigned char)(0xC0 | encode)); 5878 } 5879 5880 // Versions with no second source register (non-destructive source). 5881 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool opNoRegMask) { 5882 InstructionMark im(this); 5883 simd_prefix(dst, xnoreg, src, pre, opNoRegMask); 5884 emit_int8(opcode); 5885 emit_operand(dst, src); 5886 } 5887 5888 void Assembler::emit_simd_arith_nonds_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool opNoRegMask) { 5889 InstructionMark im(this); 5890 simd_prefix_q(dst, xnoreg, src, pre, opNoRegMask); 5891 emit_int8(opcode); 5892 emit_operand(dst, src); 5893 } 5894 5895 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg, bool legacy_mode) { 5896 int encode = simd_prefix_and_encode(dst, xnoreg, src, pre, no_mask_reg, VEX_OPCODE_0F, legacy_mode, AVX_128bit); 5897 emit_int8(opcode); 5898 emit_int8((unsigned char)(0xC0 | encode)); 5899 } 5900 5901 void Assembler::emit_simd_arith_nonds_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg) { 5902 int encode = simd_prefix_and_encode(dst, xnoreg, src, pre, no_mask_reg, VEX_OPCODE_0F, true, AVX_128bit); 5903 emit_int8(opcode); 5904 emit_int8((unsigned char)(0xC0 | encode)); 5905 } 5906 5907 // 3-operands AVX instructions 5908 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, Address src, 5909 VexSimdPrefix pre, int vector_len, bool no_mask_reg, bool legacy_mode) { 5910 InstructionMark im(this); 5911 vex_prefix(dst, nds, src, pre, vector_len, no_mask_reg, legacy_mode); 5912 emit_int8(opcode); 5913 emit_operand(dst, src); 5914 } 5915 5916 void Assembler::emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds, 5917 Address src, VexSimdPrefix pre, int vector_len, bool no_mask_reg) { 5918 InstructionMark im(this); 5919 vex_prefix_q(dst, nds, src, pre, vector_len, no_mask_reg); 5920 emit_int8(opcode); 5921 emit_operand(dst, src); 5922 } 5923 5924 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, 5925 VexSimdPrefix pre, int vector_len, bool no_mask_reg, bool legacy_mode) { 5926 int encode = vex_prefix_and_encode(dst, nds, src, pre, vector_len, VEX_OPCODE_0F, false, no_mask_reg); 5927 emit_int8(opcode); 5928 emit_int8((unsigned char)(0xC0 | encode)); 5929 } 5930 5931 void Assembler::emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, 5932 VexSimdPrefix pre, int vector_len, bool no_mask_reg) { 5933 int src_enc = src->encoding(); 5934 int dst_enc = dst->encoding(); 5935 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5936 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, VEX_OPCODE_0F, true, vector_len, false, no_mask_reg); 5937 emit_int8(opcode); 5938 emit_int8((unsigned char)(0xC0 | encode)); 5939 } 5940 5941 #ifndef _LP64 5942 5943 void Assembler::incl(Register dst) { 5944 // Don't use it directly. Use MacroAssembler::incrementl() instead. 5945 emit_int8(0x40 | dst->encoding()); 5946 } 5947 5948 void Assembler::lea(Register dst, Address src) { 5949 leal(dst, src); 5950 } 5951 5952 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { 5953 InstructionMark im(this); 5954 emit_int8((unsigned char)0xC7); 5955 emit_operand(rax, dst); 5956 emit_data((int)imm32, rspec, 0); 5957 } 5958 5959 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { 5960 InstructionMark im(this); 5961 int encode = prefix_and_encode(dst->encoding()); 5962 emit_int8((unsigned char)(0xB8 | encode)); 5963 emit_data((int)imm32, rspec, 0); 5964 } 5965 5966 void Assembler::popa() { // 32bit 5967 emit_int8(0x61); 5968 } 5969 5970 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { 5971 InstructionMark im(this); 5972 emit_int8(0x68); 5973 emit_data(imm32, rspec, 0); 5974 } 5975 5976 void Assembler::pusha() { // 32bit 5977 emit_int8(0x60); 5978 } 5979 5980 void Assembler::set_byte_if_not_zero(Register dst) { 5981 emit_int8(0x0F); 5982 emit_int8((unsigned char)0x95); 5983 emit_int8((unsigned char)(0xE0 | dst->encoding())); 5984 } 5985 5986 void Assembler::shldl(Register dst, Register src) { 5987 emit_int8(0x0F); 5988 emit_int8((unsigned char)0xA5); 5989 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 5990 } 5991 5992 void Assembler::shrdl(Register dst, Register src) { 5993 emit_int8(0x0F); 5994 emit_int8((unsigned char)0xAD); 5995 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 5996 } 5997 5998 #else // LP64 5999 6000 void Assembler::set_byte_if_not_zero(Register dst) { 6001 int enc = prefix_and_encode(dst->encoding(), true); 6002 emit_int8(0x0F); 6003 emit_int8((unsigned char)0x95); 6004 emit_int8((unsigned char)(0xE0 | enc)); 6005 } 6006 6007 // 64bit only pieces of the assembler 6008 // This should only be used by 64bit instructions that can use rip-relative 6009 // it cannot be used by instructions that want an immediate value. 6010 6011 bool Assembler::reachable(AddressLiteral adr) { 6012 int64_t disp; 6013 // None will force a 64bit literal to the code stream. Likely a placeholder 6014 // for something that will be patched later and we need to certain it will 6015 // always be reachable. 6016 if (adr.reloc() == relocInfo::none) { 6017 return false; 6018 } 6019 if (adr.reloc() == relocInfo::internal_word_type) { 6020 // This should be rip relative and easily reachable. 6021 return true; 6022 } 6023 if (adr.reloc() == relocInfo::virtual_call_type || 6024 adr.reloc() == relocInfo::opt_virtual_call_type || 6025 adr.reloc() == relocInfo::static_call_type || 6026 adr.reloc() == relocInfo::static_stub_type ) { 6027 // This should be rip relative within the code cache and easily 6028 // reachable until we get huge code caches. (At which point 6029 // ic code is going to have issues). 6030 return true; 6031 } 6032 if (adr.reloc() != relocInfo::external_word_type && 6033 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special 6034 adr.reloc() != relocInfo::poll_type && // relocs to identify them 6035 adr.reloc() != relocInfo::runtime_call_type ) { 6036 return false; 6037 } 6038 6039 // Stress the correction code 6040 if (ForceUnreachable) { 6041 // Must be runtimecall reloc, see if it is in the codecache 6042 // Flipping stuff in the codecache to be unreachable causes issues 6043 // with things like inline caches where the additional instructions 6044 // are not handled. 6045 if (CodeCache::find_blob(adr._target) == NULL) { 6046 return false; 6047 } 6048 } 6049 // For external_word_type/runtime_call_type if it is reachable from where we 6050 // are now (possibly a temp buffer) and where we might end up 6051 // anywhere in the codeCache then we are always reachable. 6052 // This would have to change if we ever save/restore shared code 6053 // to be more pessimistic. 6054 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); 6055 if (!is_simm32(disp)) return false; 6056 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); 6057 if (!is_simm32(disp)) return false; 6058 6059 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int)); 6060 6061 // Because rip relative is a disp + address_of_next_instruction and we 6062 // don't know the value of address_of_next_instruction we apply a fudge factor 6063 // to make sure we will be ok no matter the size of the instruction we get placed into. 6064 // We don't have to fudge the checks above here because they are already worst case. 6065 6066 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal 6067 // + 4 because better safe than sorry. 6068 const int fudge = 12 + 4; 6069 if (disp < 0) { 6070 disp -= fudge; 6071 } else { 6072 disp += fudge; 6073 } 6074 return is_simm32(disp); 6075 } 6076 6077 // Check if the polling page is not reachable from the code cache using rip-relative 6078 // addressing. 6079 bool Assembler::is_polling_page_far() { 6080 intptr_t addr = (intptr_t)os::get_polling_page(); 6081 return ForceUnreachable || 6082 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) || 6083 !is_simm32(addr - (intptr_t)CodeCache::high_bound()); 6084 } 6085 6086 void Assembler::emit_data64(jlong data, 6087 relocInfo::relocType rtype, 6088 int format) { 6089 if (rtype == relocInfo::none) { 6090 emit_int64(data); 6091 } else { 6092 emit_data64(data, Relocation::spec_simple(rtype), format); 6093 } 6094 } 6095 6096 void Assembler::emit_data64(jlong data, 6097 RelocationHolder const& rspec, 6098 int format) { 6099 assert(imm_operand == 0, "default format must be immediate in this file"); 6100 assert(imm_operand == format, "must be immediate"); 6101 assert(inst_mark() != NULL, "must be inside InstructionMark"); 6102 // Do not use AbstractAssembler::relocate, which is not intended for 6103 // embedded words. Instead, relocate to the enclosing instruction. 6104 code_section()->relocate(inst_mark(), rspec, format); 6105 #ifdef ASSERT 6106 check_relocation(rspec, format); 6107 #endif 6108 emit_int64(data); 6109 } 6110 6111 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 6112 if (reg_enc >= 8) { 6113 prefix(REX_B); 6114 reg_enc -= 8; 6115 } else if (byteinst && reg_enc >= 4) { 6116 prefix(REX); 6117 } 6118 return reg_enc; 6119 } 6120 6121 int Assembler::prefixq_and_encode(int reg_enc) { 6122 if (reg_enc < 8) { 6123 prefix(REX_W); 6124 } else { 6125 prefix(REX_WB); 6126 reg_enc -= 8; 6127 } 6128 return reg_enc; 6129 } 6130 6131 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { 6132 if (dst_enc < 8) { 6133 if (src_enc >= 8) { 6134 prefix(REX_B); 6135 src_enc -= 8; 6136 } else if (byteinst && src_enc >= 4) { 6137 prefix(REX); 6138 } 6139 } else { 6140 if (src_enc < 8) { 6141 prefix(REX_R); 6142 } else { 6143 prefix(REX_RB); 6144 src_enc -= 8; 6145 } 6146 dst_enc -= 8; 6147 } 6148 return dst_enc << 3 | src_enc; 6149 } 6150 6151 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { 6152 if (dst_enc < 8) { 6153 if (src_enc < 8) { 6154 prefix(REX_W); 6155 } else { 6156 prefix(REX_WB); 6157 src_enc -= 8; 6158 } 6159 } else { 6160 if (src_enc < 8) { 6161 prefix(REX_WR); 6162 } else { 6163 prefix(REX_WRB); 6164 src_enc -= 8; 6165 } 6166 dst_enc -= 8; 6167 } 6168 return dst_enc << 3 | src_enc; 6169 } 6170 6171 void Assembler::prefix(Register reg) { 6172 if (reg->encoding() >= 8) { 6173 prefix(REX_B); 6174 } 6175 } 6176 6177 void Assembler::prefix(Address adr) { 6178 if (adr.base_needs_rex()) { 6179 if (adr.index_needs_rex()) { 6180 prefix(REX_XB); 6181 } else { 6182 prefix(REX_B); 6183 } 6184 } else { 6185 if (adr.index_needs_rex()) { 6186 prefix(REX_X); 6187 } 6188 } 6189 } 6190 6191 void Assembler::prefixq(Address adr) { 6192 if (adr.base_needs_rex()) { 6193 if (adr.index_needs_rex()) { 6194 prefix(REX_WXB); 6195 } else { 6196 prefix(REX_WB); 6197 } 6198 } else { 6199 if (adr.index_needs_rex()) { 6200 prefix(REX_WX); 6201 } else { 6202 prefix(REX_W); 6203 } 6204 } 6205 } 6206 6207 6208 void Assembler::prefix(Address adr, Register reg, bool byteinst) { 6209 if (reg->encoding() < 8) { 6210 if (adr.base_needs_rex()) { 6211 if (adr.index_needs_rex()) { 6212 prefix(REX_XB); 6213 } else { 6214 prefix(REX_B); 6215 } 6216 } else { 6217 if (adr.index_needs_rex()) { 6218 prefix(REX_X); 6219 } else if (byteinst && reg->encoding() >= 4 ) { 6220 prefix(REX); 6221 } 6222 } 6223 } else { 6224 if (adr.base_needs_rex()) { 6225 if (adr.index_needs_rex()) { 6226 prefix(REX_RXB); 6227 } else { 6228 prefix(REX_RB); 6229 } 6230 } else { 6231 if (adr.index_needs_rex()) { 6232 prefix(REX_RX); 6233 } else { 6234 prefix(REX_R); 6235 } 6236 } 6237 } 6238 } 6239 6240 void Assembler::prefixq(Address adr, Register src) { 6241 if (src->encoding() < 8) { 6242 if (adr.base_needs_rex()) { 6243 if (adr.index_needs_rex()) { 6244 prefix(REX_WXB); 6245 } else { 6246 prefix(REX_WB); 6247 } 6248 } else { 6249 if (adr.index_needs_rex()) { 6250 prefix(REX_WX); 6251 } else { 6252 prefix(REX_W); 6253 } 6254 } 6255 } else { 6256 if (adr.base_needs_rex()) { 6257 if (adr.index_needs_rex()) { 6258 prefix(REX_WRXB); 6259 } else { 6260 prefix(REX_WRB); 6261 } 6262 } else { 6263 if (adr.index_needs_rex()) { 6264 prefix(REX_WRX); 6265 } else { 6266 prefix(REX_WR); 6267 } 6268 } 6269 } 6270 } 6271 6272 void Assembler::prefix(Address adr, XMMRegister reg) { 6273 if (reg->encoding() < 8) { 6274 if (adr.base_needs_rex()) { 6275 if (adr.index_needs_rex()) { 6276 prefix(REX_XB); 6277 } else { 6278 prefix(REX_B); 6279 } 6280 } else { 6281 if (adr.index_needs_rex()) { 6282 prefix(REX_X); 6283 } 6284 } 6285 } else { 6286 if (adr.base_needs_rex()) { 6287 if (adr.index_needs_rex()) { 6288 prefix(REX_RXB); 6289 } else { 6290 prefix(REX_RB); 6291 } 6292 } else { 6293 if (adr.index_needs_rex()) { 6294 prefix(REX_RX); 6295 } else { 6296 prefix(REX_R); 6297 } 6298 } 6299 } 6300 } 6301 6302 void Assembler::prefixq(Address adr, XMMRegister src) { 6303 if (src->encoding() < 8) { 6304 if (adr.base_needs_rex()) { 6305 if (adr.index_needs_rex()) { 6306 prefix(REX_WXB); 6307 } else { 6308 prefix(REX_WB); 6309 } 6310 } else { 6311 if (adr.index_needs_rex()) { 6312 prefix(REX_WX); 6313 } else { 6314 prefix(REX_W); 6315 } 6316 } 6317 } else { 6318 if (adr.base_needs_rex()) { 6319 if (adr.index_needs_rex()) { 6320 prefix(REX_WRXB); 6321 } else { 6322 prefix(REX_WRB); 6323 } 6324 } else { 6325 if (adr.index_needs_rex()) { 6326 prefix(REX_WRX); 6327 } else { 6328 prefix(REX_WR); 6329 } 6330 } 6331 } 6332 } 6333 6334 void Assembler::adcq(Register dst, int32_t imm32) { 6335 (void) prefixq_and_encode(dst->encoding()); 6336 emit_arith(0x81, 0xD0, dst, imm32); 6337 } 6338 6339 void Assembler::adcq(Register dst, Address src) { 6340 InstructionMark im(this); 6341 prefixq(src, dst); 6342 emit_int8(0x13); 6343 emit_operand(dst, src); 6344 } 6345 6346 void Assembler::adcq(Register dst, Register src) { 6347 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6348 emit_arith(0x13, 0xC0, dst, src); 6349 } 6350 6351 void Assembler::addq(Address dst, int32_t imm32) { 6352 InstructionMark im(this); 6353 prefixq(dst); 6354 emit_arith_operand(0x81, rax, dst,imm32); 6355 } 6356 6357 void Assembler::addq(Address dst, Register src) { 6358 InstructionMark im(this); 6359 prefixq(dst, src); 6360 emit_int8(0x01); 6361 emit_operand(src, dst); 6362 } 6363 6364 void Assembler::addq(Register dst, int32_t imm32) { 6365 (void) prefixq_and_encode(dst->encoding()); 6366 emit_arith(0x81, 0xC0, dst, imm32); 6367 } 6368 6369 void Assembler::addq(Register dst, Address src) { 6370 InstructionMark im(this); 6371 prefixq(src, dst); 6372 emit_int8(0x03); 6373 emit_operand(dst, src); 6374 } 6375 6376 void Assembler::addq(Register dst, Register src) { 6377 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6378 emit_arith(0x03, 0xC0, dst, src); 6379 } 6380 6381 void Assembler::adcxq(Register dst, Register src) { 6382 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 6383 emit_int8((unsigned char)0x66); 6384 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6385 emit_int8(0x0F); 6386 emit_int8(0x38); 6387 emit_int8((unsigned char)0xF6); 6388 emit_int8((unsigned char)(0xC0 | encode)); 6389 } 6390 6391 void Assembler::adoxq(Register dst, Register src) { 6392 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 6393 emit_int8((unsigned char)0xF3); 6394 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6395 emit_int8(0x0F); 6396 emit_int8(0x38); 6397 emit_int8((unsigned char)0xF6); 6398 emit_int8((unsigned char)(0xC0 | encode)); 6399 } 6400 6401 void Assembler::andq(Address dst, int32_t imm32) { 6402 InstructionMark im(this); 6403 prefixq(dst); 6404 emit_int8((unsigned char)0x81); 6405 emit_operand(rsp, dst, 4); 6406 emit_int32(imm32); 6407 } 6408 6409 void Assembler::andq(Register dst, int32_t imm32) { 6410 (void) prefixq_and_encode(dst->encoding()); 6411 emit_arith(0x81, 0xE0, dst, imm32); 6412 } 6413 6414 void Assembler::andq(Register dst, Address src) { 6415 InstructionMark im(this); 6416 prefixq(src, dst); 6417 emit_int8(0x23); 6418 emit_operand(dst, src); 6419 } 6420 6421 void Assembler::andq(Register dst, Register src) { 6422 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6423 emit_arith(0x23, 0xC0, dst, src); 6424 } 6425 6426 void Assembler::andnq(Register dst, Register src1, Register src2) { 6427 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6428 int encode = vex_prefix_0F38_and_encode_q_legacy(dst, src1, src2); 6429 emit_int8((unsigned char)0xF2); 6430 emit_int8((unsigned char)(0xC0 | encode)); 6431 } 6432 6433 void Assembler::andnq(Register dst, Register src1, Address src2) { 6434 InstructionMark im(this); 6435 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6436 vex_prefix_0F38_q_legacy(dst, src1, src2); 6437 emit_int8((unsigned char)0xF2); 6438 emit_operand(dst, src2); 6439 } 6440 6441 void Assembler::bsfq(Register dst, Register src) { 6442 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6443 emit_int8(0x0F); 6444 emit_int8((unsigned char)0xBC); 6445 emit_int8((unsigned char)(0xC0 | encode)); 6446 } 6447 6448 void Assembler::bsrq(Register dst, Register src) { 6449 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6450 emit_int8(0x0F); 6451 emit_int8((unsigned char)0xBD); 6452 emit_int8((unsigned char)(0xC0 | encode)); 6453 } 6454 6455 void Assembler::bswapq(Register reg) { 6456 int encode = prefixq_and_encode(reg->encoding()); 6457 emit_int8(0x0F); 6458 emit_int8((unsigned char)(0xC8 | encode)); 6459 } 6460 6461 void Assembler::blsiq(Register dst, Register src) { 6462 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6463 int encode = vex_prefix_0F38_and_encode_q_legacy(rbx, dst, src); 6464 emit_int8((unsigned char)0xF3); 6465 emit_int8((unsigned char)(0xC0 | encode)); 6466 } 6467 6468 void Assembler::blsiq(Register dst, Address src) { 6469 InstructionMark im(this); 6470 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6471 vex_prefix_0F38_q_legacy(rbx, dst, src); 6472 emit_int8((unsigned char)0xF3); 6473 emit_operand(rbx, src); 6474 } 6475 6476 void Assembler::blsmskq(Register dst, Register src) { 6477 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6478 int encode = vex_prefix_0F38_and_encode_q_legacy(rdx, dst, src); 6479 emit_int8((unsigned char)0xF3); 6480 emit_int8((unsigned char)(0xC0 | encode)); 6481 } 6482 6483 void Assembler::blsmskq(Register dst, Address src) { 6484 InstructionMark im(this); 6485 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6486 vex_prefix_0F38_q_legacy(rdx, dst, src); 6487 emit_int8((unsigned char)0xF3); 6488 emit_operand(rdx, src); 6489 } 6490 6491 void Assembler::blsrq(Register dst, Register src) { 6492 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6493 int encode = vex_prefix_0F38_and_encode_q_legacy(rcx, dst, src); 6494 emit_int8((unsigned char)0xF3); 6495 emit_int8((unsigned char)(0xC0 | encode)); 6496 } 6497 6498 void Assembler::blsrq(Register dst, Address src) { 6499 InstructionMark im(this); 6500 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6501 vex_prefix_0F38_q_legacy(rcx, dst, src); 6502 emit_int8((unsigned char)0xF3); 6503 emit_operand(rcx, src); 6504 } 6505 6506 void Assembler::cdqq() { 6507 prefix(REX_W); 6508 emit_int8((unsigned char)0x99); 6509 } 6510 6511 void Assembler::clflush(Address adr) { 6512 prefix(adr); 6513 emit_int8(0x0F); 6514 emit_int8((unsigned char)0xAE); 6515 emit_operand(rdi, adr); 6516 } 6517 6518 void Assembler::cmovq(Condition cc, Register dst, Register src) { 6519 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6520 emit_int8(0x0F); 6521 emit_int8(0x40 | cc); 6522 emit_int8((unsigned char)(0xC0 | encode)); 6523 } 6524 6525 void Assembler::cmovq(Condition cc, Register dst, Address src) { 6526 InstructionMark im(this); 6527 prefixq(src, dst); 6528 emit_int8(0x0F); 6529 emit_int8(0x40 | cc); 6530 emit_operand(dst, src); 6531 } 6532 6533 void Assembler::cmpq(Address dst, int32_t imm32) { 6534 InstructionMark im(this); 6535 prefixq(dst); 6536 emit_int8((unsigned char)0x81); 6537 emit_operand(rdi, dst, 4); 6538 emit_int32(imm32); 6539 } 6540 6541 void Assembler::cmpq(Register dst, int32_t imm32) { 6542 (void) prefixq_and_encode(dst->encoding()); 6543 emit_arith(0x81, 0xF8, dst, imm32); 6544 } 6545 6546 void Assembler::cmpq(Address dst, Register src) { 6547 InstructionMark im(this); 6548 prefixq(dst, src); 6549 emit_int8(0x3B); 6550 emit_operand(src, dst); 6551 } 6552 6553 void Assembler::cmpq(Register dst, Register src) { 6554 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6555 emit_arith(0x3B, 0xC0, dst, src); 6556 } 6557 6558 void Assembler::cmpq(Register dst, Address src) { 6559 InstructionMark im(this); 6560 prefixq(src, dst); 6561 emit_int8(0x3B); 6562 emit_operand(dst, src); 6563 } 6564 6565 void Assembler::cmpxchgq(Register reg, Address adr) { 6566 InstructionMark im(this); 6567 prefixq(adr, reg); 6568 emit_int8(0x0F); 6569 emit_int8((unsigned char)0xB1); 6570 emit_operand(reg, adr); 6571 } 6572 6573 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { 6574 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6575 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2, true); 6576 emit_int8(0x2A); 6577 emit_int8((unsigned char)(0xC0 | encode)); 6578 } 6579 6580 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { 6581 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6582 if (VM_Version::supports_evex()) { 6583 tuple_type = EVEX_T1S; 6584 input_size_in_bits = EVEX_32bit; 6585 } 6586 InstructionMark im(this); 6587 simd_prefix_q(dst, dst, src, VEX_SIMD_F2, true); 6588 emit_int8(0x2A); 6589 emit_operand(dst, src); 6590 } 6591 6592 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { 6593 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6594 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3, true); 6595 emit_int8(0x2A); 6596 emit_int8((unsigned char)(0xC0 | encode)); 6597 } 6598 6599 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) { 6600 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6601 if (VM_Version::supports_evex()) { 6602 tuple_type = EVEX_T1S; 6603 input_size_in_bits = EVEX_32bit; 6604 } 6605 InstructionMark im(this); 6606 simd_prefix_q(dst, dst, src, VEX_SIMD_F3, true); 6607 emit_int8(0x2A); 6608 emit_operand(dst, src); 6609 } 6610 6611 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { 6612 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6613 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, true); 6614 emit_int8(0x2C); 6615 emit_int8((unsigned char)(0xC0 | encode)); 6616 } 6617 6618 void Assembler::cvttss2siq(Register dst, XMMRegister src) { 6619 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6620 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, true); 6621 emit_int8(0x2C); 6622 emit_int8((unsigned char)(0xC0 | encode)); 6623 } 6624 6625 void Assembler::decl(Register dst) { 6626 // Don't use it directly. Use MacroAssembler::decrementl() instead. 6627 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) 6628 int encode = prefix_and_encode(dst->encoding()); 6629 emit_int8((unsigned char)0xFF); 6630 emit_int8((unsigned char)(0xC8 | encode)); 6631 } 6632 6633 void Assembler::decq(Register dst) { 6634 // Don't use it directly. Use MacroAssembler::decrementq() instead. 6635 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 6636 int encode = prefixq_and_encode(dst->encoding()); 6637 emit_int8((unsigned char)0xFF); 6638 emit_int8(0xC8 | encode); 6639 } 6640 6641 void Assembler::decq(Address dst) { 6642 // Don't use it directly. Use MacroAssembler::decrementq() instead. 6643 InstructionMark im(this); 6644 prefixq(dst); 6645 emit_int8((unsigned char)0xFF); 6646 emit_operand(rcx, dst); 6647 } 6648 6649 void Assembler::fxrstor(Address src) { 6650 prefixq(src); 6651 emit_int8(0x0F); 6652 emit_int8((unsigned char)0xAE); 6653 emit_operand(as_Register(1), src); 6654 } 6655 6656 void Assembler::fxsave(Address dst) { 6657 prefixq(dst); 6658 emit_int8(0x0F); 6659 emit_int8((unsigned char)0xAE); 6660 emit_operand(as_Register(0), dst); 6661 } 6662 6663 void Assembler::idivq(Register src) { 6664 int encode = prefixq_and_encode(src->encoding()); 6665 emit_int8((unsigned char)0xF7); 6666 emit_int8((unsigned char)(0xF8 | encode)); 6667 } 6668 6669 void Assembler::imulq(Register dst, Register src) { 6670 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6671 emit_int8(0x0F); 6672 emit_int8((unsigned char)0xAF); 6673 emit_int8((unsigned char)(0xC0 | encode)); 6674 } 6675 6676 void Assembler::imulq(Register dst, Register src, int value) { 6677 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6678 if (is8bit(value)) { 6679 emit_int8(0x6B); 6680 emit_int8((unsigned char)(0xC0 | encode)); 6681 emit_int8(value & 0xFF); 6682 } else { 6683 emit_int8(0x69); 6684 emit_int8((unsigned char)(0xC0 | encode)); 6685 emit_int32(value); 6686 } 6687 } 6688 6689 void Assembler::imulq(Register dst, Address src) { 6690 InstructionMark im(this); 6691 prefixq(src, dst); 6692 emit_int8(0x0F); 6693 emit_int8((unsigned char) 0xAF); 6694 emit_operand(dst, src); 6695 } 6696 6697 void Assembler::incl(Register dst) { 6698 // Don't use it directly. Use MacroAssembler::incrementl() instead. 6699 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 6700 int encode = prefix_and_encode(dst->encoding()); 6701 emit_int8((unsigned char)0xFF); 6702 emit_int8((unsigned char)(0xC0 | encode)); 6703 } 6704 6705 void Assembler::incq(Register dst) { 6706 // Don't use it directly. Use MacroAssembler::incrementq() instead. 6707 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 6708 int encode = prefixq_and_encode(dst->encoding()); 6709 emit_int8((unsigned char)0xFF); 6710 emit_int8((unsigned char)(0xC0 | encode)); 6711 } 6712 6713 void Assembler::incq(Address dst) { 6714 // Don't use it directly. Use MacroAssembler::incrementq() instead. 6715 InstructionMark im(this); 6716 prefixq(dst); 6717 emit_int8((unsigned char)0xFF); 6718 emit_operand(rax, dst); 6719 } 6720 6721 void Assembler::lea(Register dst, Address src) { 6722 leaq(dst, src); 6723 } 6724 6725 void Assembler::leaq(Register dst, Address src) { 6726 InstructionMark im(this); 6727 prefixq(src, dst); 6728 emit_int8((unsigned char)0x8D); 6729 emit_operand(dst, src); 6730 } 6731 6732 void Assembler::mov64(Register dst, int64_t imm64) { 6733 InstructionMark im(this); 6734 int encode = prefixq_and_encode(dst->encoding()); 6735 emit_int8((unsigned char)(0xB8 | encode)); 6736 emit_int64(imm64); 6737 } 6738 6739 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 6740 InstructionMark im(this); 6741 int encode = prefixq_and_encode(dst->encoding()); 6742 emit_int8(0xB8 | encode); 6743 emit_data64(imm64, rspec); 6744 } 6745 6746 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { 6747 InstructionMark im(this); 6748 int encode = prefix_and_encode(dst->encoding()); 6749 emit_int8((unsigned char)(0xB8 | encode)); 6750 emit_data((int)imm32, rspec, narrow_oop_operand); 6751 } 6752 6753 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { 6754 InstructionMark im(this); 6755 prefix(dst); 6756 emit_int8((unsigned char)0xC7); 6757 emit_operand(rax, dst, 4); 6758 emit_data((int)imm32, rspec, narrow_oop_operand); 6759 } 6760 6761 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { 6762 InstructionMark im(this); 6763 int encode = prefix_and_encode(src1->encoding()); 6764 emit_int8((unsigned char)0x81); 6765 emit_int8((unsigned char)(0xF8 | encode)); 6766 emit_data((int)imm32, rspec, narrow_oop_operand); 6767 } 6768 6769 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { 6770 InstructionMark im(this); 6771 prefix(src1); 6772 emit_int8((unsigned char)0x81); 6773 emit_operand(rax, src1, 4); 6774 emit_data((int)imm32, rspec, narrow_oop_operand); 6775 } 6776 6777 void Assembler::lzcntq(Register dst, Register src) { 6778 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 6779 emit_int8((unsigned char)0xF3); 6780 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6781 emit_int8(0x0F); 6782 emit_int8((unsigned char)0xBD); 6783 emit_int8((unsigned char)(0xC0 | encode)); 6784 } 6785 6786 void Assembler::movdq(XMMRegister dst, Register src) { 6787 // table D-1 says MMX/SSE2 6788 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6789 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66, true); 6790 emit_int8(0x6E); 6791 emit_int8((unsigned char)(0xC0 | encode)); 6792 } 6793 6794 void Assembler::movdq(Register dst, XMMRegister src) { 6795 // table D-1 says MMX/SSE2 6796 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6797 // swap src/dst to get correct prefix 6798 int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66, true); 6799 emit_int8(0x7E); 6800 emit_int8((unsigned char)(0xC0 | encode)); 6801 } 6802 6803 void Assembler::movq(Register dst, Register src) { 6804 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6805 emit_int8((unsigned char)0x8B); 6806 emit_int8((unsigned char)(0xC0 | encode)); 6807 } 6808 6809 void Assembler::movq(Register dst, Address src) { 6810 InstructionMark im(this); 6811 prefixq(src, dst); 6812 emit_int8((unsigned char)0x8B); 6813 emit_operand(dst, src); 6814 } 6815 6816 void Assembler::movq(Address dst, Register src) { 6817 InstructionMark im(this); 6818 prefixq(dst, src); 6819 emit_int8((unsigned char)0x89); 6820 emit_operand(src, dst); 6821 } 6822 6823 void Assembler::movsbq(Register dst, Address src) { 6824 InstructionMark im(this); 6825 prefixq(src, dst); 6826 emit_int8(0x0F); 6827 emit_int8((unsigned char)0xBE); 6828 emit_operand(dst, src); 6829 } 6830 6831 void Assembler::movsbq(Register dst, Register src) { 6832 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6833 emit_int8(0x0F); 6834 emit_int8((unsigned char)0xBE); 6835 emit_int8((unsigned char)(0xC0 | encode)); 6836 } 6837 6838 void Assembler::movslq(Register dst, int32_t imm32) { 6839 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 6840 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 6841 // as a result we shouldn't use until tested at runtime... 6842 ShouldNotReachHere(); 6843 InstructionMark im(this); 6844 int encode = prefixq_and_encode(dst->encoding()); 6845 emit_int8((unsigned char)(0xC7 | encode)); 6846 emit_int32(imm32); 6847 } 6848 6849 void Assembler::movslq(Address dst, int32_t imm32) { 6850 assert(is_simm32(imm32), "lost bits"); 6851 InstructionMark im(this); 6852 prefixq(dst); 6853 emit_int8((unsigned char)0xC7); 6854 emit_operand(rax, dst, 4); 6855 emit_int32(imm32); 6856 } 6857 6858 void Assembler::movslq(Register dst, Address src) { 6859 InstructionMark im(this); 6860 prefixq(src, dst); 6861 emit_int8(0x63); 6862 emit_operand(dst, src); 6863 } 6864 6865 void Assembler::movslq(Register dst, Register src) { 6866 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6867 emit_int8(0x63); 6868 emit_int8((unsigned char)(0xC0 | encode)); 6869 } 6870 6871 void Assembler::movswq(Register dst, Address src) { 6872 InstructionMark im(this); 6873 prefixq(src, dst); 6874 emit_int8(0x0F); 6875 emit_int8((unsigned char)0xBF); 6876 emit_operand(dst, src); 6877 } 6878 6879 void Assembler::movswq(Register dst, Register src) { 6880 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6881 emit_int8((unsigned char)0x0F); 6882 emit_int8((unsigned char)0xBF); 6883 emit_int8((unsigned char)(0xC0 | encode)); 6884 } 6885 6886 void Assembler::movzbq(Register dst, Address src) { 6887 InstructionMark im(this); 6888 prefixq(src, dst); 6889 emit_int8((unsigned char)0x0F); 6890 emit_int8((unsigned char)0xB6); 6891 emit_operand(dst, src); 6892 } 6893 6894 void Assembler::movzbq(Register dst, Register src) { 6895 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6896 emit_int8(0x0F); 6897 emit_int8((unsigned char)0xB6); 6898 emit_int8(0xC0 | encode); 6899 } 6900 6901 void Assembler::movzwq(Register dst, Address src) { 6902 InstructionMark im(this); 6903 prefixq(src, dst); 6904 emit_int8((unsigned char)0x0F); 6905 emit_int8((unsigned char)0xB7); 6906 emit_operand(dst, src); 6907 } 6908 6909 void Assembler::movzwq(Register dst, Register src) { 6910 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6911 emit_int8((unsigned char)0x0F); 6912 emit_int8((unsigned char)0xB7); 6913 emit_int8((unsigned char)(0xC0 | encode)); 6914 } 6915 6916 void Assembler::mulq(Address src) { 6917 InstructionMark im(this); 6918 prefixq(src); 6919 emit_int8((unsigned char)0xF7); 6920 emit_operand(rsp, src); 6921 } 6922 6923 void Assembler::mulq(Register src) { 6924 int encode = prefixq_and_encode(src->encoding()); 6925 emit_int8((unsigned char)0xF7); 6926 emit_int8((unsigned char)(0xE0 | encode)); 6927 } 6928 6929 void Assembler::mulxq(Register dst1, Register dst2, Register src) { 6930 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 6931 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), 6932 VEX_SIMD_F2, VEX_OPCODE_0F_38, true, AVX_128bit, true, false); 6933 emit_int8((unsigned char)0xF6); 6934 emit_int8((unsigned char)(0xC0 | encode)); 6935 } 6936 6937 void Assembler::negq(Register dst) { 6938 int encode = prefixq_and_encode(dst->encoding()); 6939 emit_int8((unsigned char)0xF7); 6940 emit_int8((unsigned char)(0xD8 | encode)); 6941 } 6942 6943 void Assembler::notq(Register dst) { 6944 int encode = prefixq_and_encode(dst->encoding()); 6945 emit_int8((unsigned char)0xF7); 6946 emit_int8((unsigned char)(0xD0 | encode)); 6947 } 6948 6949 void Assembler::orq(Address dst, int32_t imm32) { 6950 InstructionMark im(this); 6951 prefixq(dst); 6952 emit_int8((unsigned char)0x81); 6953 emit_operand(rcx, dst, 4); 6954 emit_int32(imm32); 6955 } 6956 6957 void Assembler::orq(Register dst, int32_t imm32) { 6958 (void) prefixq_and_encode(dst->encoding()); 6959 emit_arith(0x81, 0xC8, dst, imm32); 6960 } 6961 6962 void Assembler::orq(Register dst, Address src) { 6963 InstructionMark im(this); 6964 prefixq(src, dst); 6965 emit_int8(0x0B); 6966 emit_operand(dst, src); 6967 } 6968 6969 void Assembler::orq(Register dst, Register src) { 6970 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6971 emit_arith(0x0B, 0xC0, dst, src); 6972 } 6973 6974 void Assembler::popa() { // 64bit 6975 movq(r15, Address(rsp, 0)); 6976 movq(r14, Address(rsp, wordSize)); 6977 movq(r13, Address(rsp, 2 * wordSize)); 6978 movq(r12, Address(rsp, 3 * wordSize)); 6979 movq(r11, Address(rsp, 4 * wordSize)); 6980 movq(r10, Address(rsp, 5 * wordSize)); 6981 movq(r9, Address(rsp, 6 * wordSize)); 6982 movq(r8, Address(rsp, 7 * wordSize)); 6983 movq(rdi, Address(rsp, 8 * wordSize)); 6984 movq(rsi, Address(rsp, 9 * wordSize)); 6985 movq(rbp, Address(rsp, 10 * wordSize)); 6986 // skip rsp 6987 movq(rbx, Address(rsp, 12 * wordSize)); 6988 movq(rdx, Address(rsp, 13 * wordSize)); 6989 movq(rcx, Address(rsp, 14 * wordSize)); 6990 movq(rax, Address(rsp, 15 * wordSize)); 6991 6992 addq(rsp, 16 * wordSize); 6993 } 6994 6995 void Assembler::popcntq(Register dst, Address src) { 6996 assert(VM_Version::supports_popcnt(), "must support"); 6997 InstructionMark im(this); 6998 emit_int8((unsigned char)0xF3); 6999 prefixq(src, dst); 7000 emit_int8((unsigned char)0x0F); 7001 emit_int8((unsigned char)0xB8); 7002 emit_operand(dst, src); 7003 } 7004 7005 void Assembler::popcntq(Register dst, Register src) { 7006 assert(VM_Version::supports_popcnt(), "must support"); 7007 emit_int8((unsigned char)0xF3); 7008 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7009 emit_int8((unsigned char)0x0F); 7010 emit_int8((unsigned char)0xB8); 7011 emit_int8((unsigned char)(0xC0 | encode)); 7012 } 7013 7014 void Assembler::popq(Address dst) { 7015 InstructionMark im(this); 7016 prefixq(dst); 7017 emit_int8((unsigned char)0x8F); 7018 emit_operand(rax, dst); 7019 } 7020 7021 void Assembler::pusha() { // 64bit 7022 // we have to store original rsp. ABI says that 128 bytes 7023 // below rsp are local scratch. 7024 movq(Address(rsp, -5 * wordSize), rsp); 7025 7026 subq(rsp, 16 * wordSize); 7027 7028 movq(Address(rsp, 15 * wordSize), rax); 7029 movq(Address(rsp, 14 * wordSize), rcx); 7030 movq(Address(rsp, 13 * wordSize), rdx); 7031 movq(Address(rsp, 12 * wordSize), rbx); 7032 // skip rsp 7033 movq(Address(rsp, 10 * wordSize), rbp); 7034 movq(Address(rsp, 9 * wordSize), rsi); 7035 movq(Address(rsp, 8 * wordSize), rdi); 7036 movq(Address(rsp, 7 * wordSize), r8); 7037 movq(Address(rsp, 6 * wordSize), r9); 7038 movq(Address(rsp, 5 * wordSize), r10); 7039 movq(Address(rsp, 4 * wordSize), r11); 7040 movq(Address(rsp, 3 * wordSize), r12); 7041 movq(Address(rsp, 2 * wordSize), r13); 7042 movq(Address(rsp, wordSize), r14); 7043 movq(Address(rsp, 0), r15); 7044 } 7045 7046 void Assembler::pushq(Address src) { 7047 InstructionMark im(this); 7048 prefixq(src); 7049 emit_int8((unsigned char)0xFF); 7050 emit_operand(rsi, src); 7051 } 7052 7053 void Assembler::rclq(Register dst, int imm8) { 7054 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7055 int encode = prefixq_and_encode(dst->encoding()); 7056 if (imm8 == 1) { 7057 emit_int8((unsigned char)0xD1); 7058 emit_int8((unsigned char)(0xD0 | encode)); 7059 } else { 7060 emit_int8((unsigned char)0xC1); 7061 emit_int8((unsigned char)(0xD0 | encode)); 7062 emit_int8(imm8); 7063 } 7064 } 7065 7066 void Assembler::rorq(Register dst, int imm8) { 7067 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7068 int encode = prefixq_and_encode(dst->encoding()); 7069 if (imm8 == 1) { 7070 emit_int8((unsigned char)0xD1); 7071 emit_int8((unsigned char)(0xC8 | encode)); 7072 } else { 7073 emit_int8((unsigned char)0xC1); 7074 emit_int8((unsigned char)(0xc8 | encode)); 7075 emit_int8(imm8); 7076 } 7077 } 7078 7079 void Assembler::rorxq(Register dst, Register src, int imm8) { 7080 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 7081 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, 7082 VEX_OPCODE_0F_3A, true, AVX_128bit, true, false); 7083 emit_int8((unsigned char)0xF0); 7084 emit_int8((unsigned char)(0xC0 | encode)); 7085 emit_int8(imm8); 7086 } 7087 7088 void Assembler::sarq(Register dst, int imm8) { 7089 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7090 int encode = prefixq_and_encode(dst->encoding()); 7091 if (imm8 == 1) { 7092 emit_int8((unsigned char)0xD1); 7093 emit_int8((unsigned char)(0xF8 | encode)); 7094 } else { 7095 emit_int8((unsigned char)0xC1); 7096 emit_int8((unsigned char)(0xF8 | encode)); 7097 emit_int8(imm8); 7098 } 7099 } 7100 7101 void Assembler::sarq(Register dst) { 7102 int encode = prefixq_and_encode(dst->encoding()); 7103 emit_int8((unsigned char)0xD3); 7104 emit_int8((unsigned char)(0xF8 | encode)); 7105 } 7106 7107 void Assembler::sbbq(Address dst, int32_t imm32) { 7108 InstructionMark im(this); 7109 prefixq(dst); 7110 emit_arith_operand(0x81, rbx, dst, imm32); 7111 } 7112 7113 void Assembler::sbbq(Register dst, int32_t imm32) { 7114 (void) prefixq_and_encode(dst->encoding()); 7115 emit_arith(0x81, 0xD8, dst, imm32); 7116 } 7117 7118 void Assembler::sbbq(Register dst, Address src) { 7119 InstructionMark im(this); 7120 prefixq(src, dst); 7121 emit_int8(0x1B); 7122 emit_operand(dst, src); 7123 } 7124 7125 void Assembler::sbbq(Register dst, Register src) { 7126 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7127 emit_arith(0x1B, 0xC0, dst, src); 7128 } 7129 7130 void Assembler::shlq(Register dst, int imm8) { 7131 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7132 int encode = prefixq_and_encode(dst->encoding()); 7133 if (imm8 == 1) { 7134 emit_int8((unsigned char)0xD1); 7135 emit_int8((unsigned char)(0xE0 | encode)); 7136 } else { 7137 emit_int8((unsigned char)0xC1); 7138 emit_int8((unsigned char)(0xE0 | encode)); 7139 emit_int8(imm8); 7140 } 7141 } 7142 7143 void Assembler::shlq(Register dst) { 7144 int encode = prefixq_and_encode(dst->encoding()); 7145 emit_int8((unsigned char)0xD3); 7146 emit_int8((unsigned char)(0xE0 | encode)); 7147 } 7148 7149 void Assembler::shrq(Register dst, int imm8) { 7150 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7151 int encode = prefixq_and_encode(dst->encoding()); 7152 emit_int8((unsigned char)0xC1); 7153 emit_int8((unsigned char)(0xE8 | encode)); 7154 emit_int8(imm8); 7155 } 7156 7157 void Assembler::shrq(Register dst) { 7158 int encode = prefixq_and_encode(dst->encoding()); 7159 emit_int8((unsigned char)0xD3); 7160 emit_int8(0xE8 | encode); 7161 } 7162 7163 void Assembler::subq(Address dst, int32_t imm32) { 7164 InstructionMark im(this); 7165 prefixq(dst); 7166 emit_arith_operand(0x81, rbp, dst, imm32); 7167 } 7168 7169 void Assembler::subq(Address dst, Register src) { 7170 InstructionMark im(this); 7171 prefixq(dst, src); 7172 emit_int8(0x29); 7173 emit_operand(src, dst); 7174 } 7175 7176 void Assembler::subq(Register dst, int32_t imm32) { 7177 (void) prefixq_and_encode(dst->encoding()); 7178 emit_arith(0x81, 0xE8, dst, imm32); 7179 } 7180 7181 // Force generation of a 4 byte immediate value even if it fits into 8bit 7182 void Assembler::subq_imm32(Register dst, int32_t imm32) { 7183 (void) prefixq_and_encode(dst->encoding()); 7184 emit_arith_imm32(0x81, 0xE8, dst, imm32); 7185 } 7186 7187 void Assembler::subq(Register dst, Address src) { 7188 InstructionMark im(this); 7189 prefixq(src, dst); 7190 emit_int8(0x2B); 7191 emit_operand(dst, src); 7192 } 7193 7194 void Assembler::subq(Register dst, Register src) { 7195 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7196 emit_arith(0x2B, 0xC0, dst, src); 7197 } 7198 7199 void Assembler::testq(Register dst, int32_t imm32) { 7200 // not using emit_arith because test 7201 // doesn't support sign-extension of 7202 // 8bit operands 7203 int encode = dst->encoding(); 7204 if (encode == 0) { 7205 prefix(REX_W); 7206 emit_int8((unsigned char)0xA9); 7207 } else { 7208 encode = prefixq_and_encode(encode); 7209 emit_int8((unsigned char)0xF7); 7210 emit_int8((unsigned char)(0xC0 | encode)); 7211 } 7212 emit_int32(imm32); 7213 } 7214 7215 void Assembler::testq(Register dst, Register src) { 7216 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7217 emit_arith(0x85, 0xC0, dst, src); 7218 } 7219 7220 void Assembler::xaddq(Address dst, Register src) { 7221 InstructionMark im(this); 7222 prefixq(dst, src); 7223 emit_int8(0x0F); 7224 emit_int8((unsigned char)0xC1); 7225 emit_operand(src, dst); 7226 } 7227 7228 void Assembler::xchgq(Register dst, Address src) { 7229 InstructionMark im(this); 7230 prefixq(src, dst); 7231 emit_int8((unsigned char)0x87); 7232 emit_operand(dst, src); 7233 } 7234 7235 void Assembler::xchgq(Register dst, Register src) { 7236 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7237 emit_int8((unsigned char)0x87); 7238 emit_int8((unsigned char)(0xc0 | encode)); 7239 } 7240 7241 void Assembler::xorq(Register dst, Register src) { 7242 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7243 emit_arith(0x33, 0xC0, dst, src); 7244 } 7245 7246 void Assembler::xorq(Register dst, Address src) { 7247 InstructionMark im(this); 7248 prefixq(src, dst); 7249 emit_int8(0x33); 7250 emit_operand(dst, src); 7251 } 7252 7253 #endif // !LP64