1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "gc/shared/cardTableModRefBS.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/objectMonitor.hpp" 36 #include "runtime/os.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "utilities/macros.hpp" 40 #if INCLUDE_ALL_GCS 41 #include "gc/g1/g1CollectedHeap.inline.hpp" 42 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 43 #include "gc/g1/heapRegion.hpp" 44 #endif // INCLUDE_ALL_GCS 45 46 #ifdef PRODUCT 47 #define BLOCK_COMMENT(str) /* nothing */ 48 #define STOP(error) stop(error) 49 #else 50 #define BLOCK_COMMENT(str) block_comment(str) 51 #define STOP(error) block_comment(error); stop(error) 52 #endif 53 54 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 55 // Implementation of AddressLiteral 56 57 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms. 58 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = { 59 // -----------------Table 4.5 -------------------- // 60 16, 32, 64, // EVEX_FV(0) 61 4, 4, 4, // EVEX_FV(1) - with Evex.b 62 16, 32, 64, // EVEX_FV(2) - with Evex.w 63 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b 64 8, 16, 32, // EVEX_HV(0) 65 4, 4, 4, // EVEX_HV(1) - with Evex.b 66 // -----------------Table 4.6 -------------------- // 67 16, 32, 64, // EVEX_FVM(0) 68 1, 1, 1, // EVEX_T1S(0) 69 2, 2, 2, // EVEX_T1S(1) 70 4, 4, 4, // EVEX_T1S(2) 71 8, 8, 8, // EVEX_T1S(3) 72 4, 4, 4, // EVEX_T1F(0) 73 8, 8, 8, // EVEX_T1F(1) 74 8, 8, 8, // EVEX_T2(0) 75 0, 16, 16, // EVEX_T2(1) 76 0, 16, 16, // EVEX_T4(0) 77 0, 0, 32, // EVEX_T4(1) 78 0, 0, 32, // EVEX_T8(0) 79 8, 16, 32, // EVEX_HVM(0) 80 4, 8, 16, // EVEX_QVM(0) 81 2, 4, 8, // EVEX_OVM(0) 82 16, 16, 16, // EVEX_M128(0) 83 8, 32, 64, // EVEX_DUP(0) 84 0, 0, 0 // EVEX_NTUP 85 }; 86 87 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { 88 _is_lval = false; 89 _target = target; 90 switch (rtype) { 91 case relocInfo::oop_type: 92 case relocInfo::metadata_type: 93 // Oops are a special case. Normally they would be their own section 94 // but in cases like icBuffer they are literals in the code stream that 95 // we don't have a section for. We use none so that we get a literal address 96 // which is always patchable. 97 break; 98 case relocInfo::external_word_type: 99 _rspec = external_word_Relocation::spec(target); 100 break; 101 case relocInfo::internal_word_type: 102 _rspec = internal_word_Relocation::spec(target); 103 break; 104 case relocInfo::opt_virtual_call_type: 105 _rspec = opt_virtual_call_Relocation::spec(); 106 break; 107 case relocInfo::static_call_type: 108 _rspec = static_call_Relocation::spec(); 109 break; 110 case relocInfo::runtime_call_type: 111 _rspec = runtime_call_Relocation::spec(); 112 break; 113 case relocInfo::poll_type: 114 case relocInfo::poll_return_type: 115 _rspec = Relocation::spec_simple(rtype); 116 break; 117 case relocInfo::none: 118 break; 119 default: 120 ShouldNotReachHere(); 121 break; 122 } 123 } 124 125 // Implementation of Address 126 127 #ifdef _LP64 128 129 Address Address::make_array(ArrayAddress adr) { 130 // Not implementable on 64bit machines 131 // Should have been handled higher up the call chain. 132 ShouldNotReachHere(); 133 return Address(); 134 } 135 136 // exceedingly dangerous constructor 137 Address::Address(int disp, address loc, relocInfo::relocType rtype) { 138 _base = noreg; 139 _index = noreg; 140 _scale = no_scale; 141 _disp = disp; 142 switch (rtype) { 143 case relocInfo::external_word_type: 144 _rspec = external_word_Relocation::spec(loc); 145 break; 146 case relocInfo::internal_word_type: 147 _rspec = internal_word_Relocation::spec(loc); 148 break; 149 case relocInfo::runtime_call_type: 150 // HMM 151 _rspec = runtime_call_Relocation::spec(); 152 break; 153 case relocInfo::poll_type: 154 case relocInfo::poll_return_type: 155 _rspec = Relocation::spec_simple(rtype); 156 break; 157 case relocInfo::none: 158 break; 159 default: 160 ShouldNotReachHere(); 161 } 162 } 163 #else // LP64 164 165 Address Address::make_array(ArrayAddress adr) { 166 AddressLiteral base = adr.base(); 167 Address index = adr.index(); 168 assert(index._disp == 0, "must not have disp"); // maybe it can? 169 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); 170 array._rspec = base._rspec; 171 return array; 172 } 173 174 // exceedingly dangerous constructor 175 Address::Address(address loc, RelocationHolder spec) { 176 _base = noreg; 177 _index = noreg; 178 _scale = no_scale; 179 _disp = (intptr_t) loc; 180 _rspec = spec; 181 } 182 183 #endif // _LP64 184 185 186 187 // Convert the raw encoding form into the form expected by the constructor for 188 // Address. An index of 4 (rsp) corresponds to having no index, so convert 189 // that to noreg for the Address constructor. 190 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 191 RelocationHolder rspec; 192 if (disp_reloc != relocInfo::none) { 193 rspec = Relocation::spec_simple(disp_reloc); 194 } 195 bool valid_index = index != rsp->encoding(); 196 if (valid_index) { 197 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 198 madr._rspec = rspec; 199 return madr; 200 } else { 201 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 202 madr._rspec = rspec; 203 return madr; 204 } 205 } 206 207 // Implementation of Assembler 208 209 int AbstractAssembler::code_fill_byte() { 210 return (u_char)'\xF4'; // hlt 211 } 212 213 // make this go away someday 214 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { 215 if (rtype == relocInfo::none) 216 emit_int32(data); 217 else 218 emit_data(data, Relocation::spec_simple(rtype), format); 219 } 220 221 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { 222 assert(imm_operand == 0, "default format must be immediate in this file"); 223 assert(inst_mark() != NULL, "must be inside InstructionMark"); 224 if (rspec.type() != relocInfo::none) { 225 #ifdef ASSERT 226 check_relocation(rspec, format); 227 #endif 228 // Do not use AbstractAssembler::relocate, which is not intended for 229 // embedded words. Instead, relocate to the enclosing instruction. 230 231 // hack. call32 is too wide for mask so use disp32 232 if (format == call32_operand) 233 code_section()->relocate(inst_mark(), rspec, disp32_operand); 234 else 235 code_section()->relocate(inst_mark(), rspec, format); 236 } 237 emit_int32(data); 238 } 239 240 static int encode(Register r) { 241 int enc = r->encoding(); 242 if (enc >= 8) { 243 enc -= 8; 244 } 245 return enc; 246 } 247 248 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { 249 assert(dst->has_byte_register(), "must have byte register"); 250 assert(isByte(op1) && isByte(op2), "wrong opcode"); 251 assert(isByte(imm8), "not a byte"); 252 assert((op1 & 0x01) == 0, "should be 8bit operation"); 253 emit_int8(op1); 254 emit_int8(op2 | encode(dst)); 255 emit_int8(imm8); 256 } 257 258 259 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { 260 assert(isByte(op1) && isByte(op2), "wrong opcode"); 261 assert((op1 & 0x01) == 1, "should be 32bit operation"); 262 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 263 if (is8bit(imm32)) { 264 emit_int8(op1 | 0x02); // set sign bit 265 emit_int8(op2 | encode(dst)); 266 emit_int8(imm32 & 0xFF); 267 } else { 268 emit_int8(op1); 269 emit_int8(op2 | encode(dst)); 270 emit_int32(imm32); 271 } 272 } 273 274 // Force generation of a 4 byte immediate value even if it fits into 8bit 275 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { 276 assert(isByte(op1) && isByte(op2), "wrong opcode"); 277 assert((op1 & 0x01) == 1, "should be 32bit operation"); 278 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 279 emit_int8(op1); 280 emit_int8(op2 | encode(dst)); 281 emit_int32(imm32); 282 } 283 284 // immediate-to-memory forms 285 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { 286 assert((op1 & 0x01) == 1, "should be 32bit operation"); 287 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 288 if (is8bit(imm32)) { 289 emit_int8(op1 | 0x02); // set sign bit 290 emit_operand(rm, adr, 1); 291 emit_int8(imm32 & 0xFF); 292 } else { 293 emit_int8(op1); 294 emit_operand(rm, adr, 4); 295 emit_int32(imm32); 296 } 297 } 298 299 300 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { 301 assert(isByte(op1) && isByte(op2), "wrong opcode"); 302 emit_int8(op1); 303 emit_int8(op2 | encode(dst) << 3 | encode(src)); 304 } 305 306 307 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 308 int cur_tuple_type, int in_size_in_bits, int cur_encoding) { 309 int mod_idx = 0; 310 // We will test if the displacement fits the compressed format and if so 311 // apply the compression to the displacment iff the result is8bit. 312 if (VM_Version::supports_evex() && is_evex_inst) { 313 switch (cur_tuple_type) { 314 case EVEX_FV: 315 if ((cur_encoding & VEX_W) == VEX_W) { 316 mod_idx += 2 + ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 317 } else { 318 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 319 } 320 break; 321 322 case EVEX_HV: 323 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 324 break; 325 326 case EVEX_FVM: 327 break; 328 329 case EVEX_T1S: 330 switch (in_size_in_bits) { 331 case EVEX_8bit: 332 break; 333 334 case EVEX_16bit: 335 mod_idx = 1; 336 break; 337 338 case EVEX_32bit: 339 mod_idx = 2; 340 break; 341 342 case EVEX_64bit: 343 mod_idx = 3; 344 break; 345 } 346 break; 347 348 case EVEX_T1F: 349 case EVEX_T2: 350 case EVEX_T4: 351 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0; 352 break; 353 354 case EVEX_T8: 355 break; 356 357 case EVEX_HVM: 358 break; 359 360 case EVEX_QVM: 361 break; 362 363 case EVEX_OVM: 364 break; 365 366 case EVEX_M128: 367 break; 368 369 case EVEX_DUP: 370 break; 371 372 default: 373 assert(0, "no valid evex tuple_table entry"); 374 break; 375 } 376 377 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 378 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len]; 379 if ((disp % disp_factor) == 0) { 380 int new_disp = disp / disp_factor; 381 if ((-0x80 <= new_disp && new_disp < 0x80)) { 382 disp = new_disp; 383 } 384 } else { 385 return false; 386 } 387 } 388 } 389 return (-0x80 <= disp && disp < 0x80); 390 } 391 392 393 bool Assembler::emit_compressed_disp_byte(int &disp) { 394 int mod_idx = 0; 395 // We will test if the displacement fits the compressed format and if so 396 // apply the compression to the displacment iff the result is8bit. 397 if (VM_Version::supports_evex() && is_evex_instruction) { 398 switch (tuple_type) { 399 case EVEX_FV: 400 if ((evex_encoding & VEX_W) == VEX_W) { 401 mod_idx += 2 + ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 402 } else { 403 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 404 } 405 break; 406 407 case EVEX_HV: 408 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 409 break; 410 411 case EVEX_FVM: 412 break; 413 414 case EVEX_T1S: 415 switch (input_size_in_bits) { 416 case EVEX_8bit: 417 break; 418 419 case EVEX_16bit: 420 mod_idx = 1; 421 break; 422 423 case EVEX_32bit: 424 mod_idx = 2; 425 break; 426 427 case EVEX_64bit: 428 mod_idx = 3; 429 break; 430 } 431 break; 432 433 case EVEX_T1F: 434 case EVEX_T2: 435 case EVEX_T4: 436 mod_idx = (input_size_in_bits == EVEX_64bit) ? 1 : 0; 437 break; 438 439 case EVEX_T8: 440 break; 441 442 case EVEX_HVM: 443 break; 444 445 case EVEX_QVM: 446 break; 447 448 case EVEX_OVM: 449 break; 450 451 case EVEX_M128: 452 break; 453 454 case EVEX_DUP: 455 break; 456 457 default: 458 assert(0, "no valid evex tuple_table entry"); 459 break; 460 } 461 462 if (avx_vector_len >= AVX_128bit && avx_vector_len <= AVX_512bit) { 463 int disp_factor = tuple_table[tuple_type + mod_idx][avx_vector_len]; 464 if ((disp % disp_factor) == 0) { 465 int new_disp = disp / disp_factor; 466 if (is8bit(new_disp)) { 467 disp = new_disp; 468 } 469 } else { 470 return false; 471 } 472 } 473 } 474 return is8bit(disp); 475 } 476 477 478 void Assembler::emit_operand(Register reg, Register base, Register index, 479 Address::ScaleFactor scale, int disp, 480 RelocationHolder const& rspec, 481 int rip_relative_correction) { 482 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); 483 484 // Encode the registers as needed in the fields they are used in 485 486 int regenc = encode(reg) << 3; 487 int indexenc = index->is_valid() ? encode(index) << 3 : 0; 488 int baseenc = base->is_valid() ? encode(base) : 0; 489 490 if (base->is_valid()) { 491 if (index->is_valid()) { 492 assert(scale != Address::no_scale, "inconsistent address"); 493 // [base + index*scale + disp] 494 if (disp == 0 && rtype == relocInfo::none && 495 base != rbp LP64_ONLY(&& base != r13)) { 496 // [base + index*scale] 497 // [00 reg 100][ss index base] 498 assert(index != rsp, "illegal addressing mode"); 499 emit_int8(0x04 | regenc); 500 emit_int8(scale << 6 | indexenc | baseenc); 501 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 502 // [base + index*scale + imm8] 503 // [01 reg 100][ss index base] imm8 504 assert(index != rsp, "illegal addressing mode"); 505 emit_int8(0x44 | regenc); 506 emit_int8(scale << 6 | indexenc | baseenc); 507 emit_int8(disp & 0xFF); 508 } else { 509 // [base + index*scale + disp32] 510 // [10 reg 100][ss index base] disp32 511 assert(index != rsp, "illegal addressing mode"); 512 emit_int8(0x84 | regenc); 513 emit_int8(scale << 6 | indexenc | baseenc); 514 emit_data(disp, rspec, disp32_operand); 515 } 516 } else if (base == rsp LP64_ONLY(|| base == r12)) { 517 // [rsp + disp] 518 if (disp == 0 && rtype == relocInfo::none) { 519 // [rsp] 520 // [00 reg 100][00 100 100] 521 emit_int8(0x04 | regenc); 522 emit_int8(0x24); 523 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 524 // [rsp + imm8] 525 // [01 reg 100][00 100 100] disp8 526 emit_int8(0x44 | regenc); 527 emit_int8(0x24); 528 emit_int8(disp & 0xFF); 529 } else { 530 // [rsp + imm32] 531 // [10 reg 100][00 100 100] disp32 532 emit_int8(0x84 | regenc); 533 emit_int8(0x24); 534 emit_data(disp, rspec, disp32_operand); 535 } 536 } else { 537 // [base + disp] 538 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); 539 if (disp == 0 && rtype == relocInfo::none && 540 base != rbp LP64_ONLY(&& base != r13)) { 541 // [base] 542 // [00 reg base] 543 emit_int8(0x00 | regenc | baseenc); 544 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 545 // [base + disp8] 546 // [01 reg base] disp8 547 emit_int8(0x40 | regenc | baseenc); 548 emit_int8(disp & 0xFF); 549 } else { 550 // [base + disp32] 551 // [10 reg base] disp32 552 emit_int8(0x80 | regenc | baseenc); 553 emit_data(disp, rspec, disp32_operand); 554 } 555 } 556 } else { 557 if (index->is_valid()) { 558 assert(scale != Address::no_scale, "inconsistent address"); 559 // [index*scale + disp] 560 // [00 reg 100][ss index 101] disp32 561 assert(index != rsp, "illegal addressing mode"); 562 emit_int8(0x04 | regenc); 563 emit_int8(scale << 6 | indexenc | 0x05); 564 emit_data(disp, rspec, disp32_operand); 565 } else if (rtype != relocInfo::none ) { 566 // [disp] (64bit) RIP-RELATIVE (32bit) abs 567 // [00 000 101] disp32 568 569 emit_int8(0x05 | regenc); 570 // Note that the RIP-rel. correction applies to the generated 571 // disp field, but _not_ to the target address in the rspec. 572 573 // disp was created by converting the target address minus the pc 574 // at the start of the instruction. That needs more correction here. 575 // intptr_t disp = target - next_ip; 576 assert(inst_mark() != NULL, "must be inside InstructionMark"); 577 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; 578 int64_t adjusted = disp; 579 // Do rip-rel adjustment for 64bit 580 LP64_ONLY(adjusted -= (next_ip - inst_mark())); 581 assert(is_simm32(adjusted), 582 "must be 32bit offset (RIP relative address)"); 583 emit_data((int32_t) adjusted, rspec, disp32_operand); 584 585 } else { 586 // 32bit never did this, did everything as the rip-rel/disp code above 587 // [disp] ABSOLUTE 588 // [00 reg 100][00 100 101] disp32 589 emit_int8(0x04 | regenc); 590 emit_int8(0x25); 591 emit_data(disp, rspec, disp32_operand); 592 } 593 } 594 is_evex_instruction = false; 595 } 596 597 void Assembler::emit_operand(XMMRegister reg, Register base, Register index, 598 Address::ScaleFactor scale, int disp, 599 RelocationHolder const& rspec) { 600 if (UseAVX > 2) { 601 int xreg_enc = reg->encoding(); 602 if (xreg_enc > 15) { 603 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf); 604 emit_operand((Register)new_reg, base, index, scale, disp, rspec); 605 return; 606 } 607 } 608 emit_operand((Register)reg, base, index, scale, disp, rspec); 609 } 610 611 // Secret local extension to Assembler::WhichOperand: 612 #define end_pc_operand (_WhichOperand_limit) 613 614 address Assembler::locate_operand(address inst, WhichOperand which) { 615 // Decode the given instruction, and return the address of 616 // an embedded 32-bit operand word. 617 618 // If "which" is disp32_operand, selects the displacement portion 619 // of an effective address specifier. 620 // If "which" is imm64_operand, selects the trailing immediate constant. 621 // If "which" is call32_operand, selects the displacement of a call or jump. 622 // Caller is responsible for ensuring that there is such an operand, 623 // and that it is 32/64 bits wide. 624 625 // If "which" is end_pc_operand, find the end of the instruction. 626 627 address ip = inst; 628 bool is_64bit = false; 629 630 debug_only(bool has_disp32 = false); 631 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn 632 633 again_after_prefix: 634 switch (0xFF & *ip++) { 635 636 // These convenience macros generate groups of "case" labels for the switch. 637 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 638 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ 639 case (x)+4: case (x)+5: case (x)+6: case (x)+7 640 #define REP16(x) REP8((x)+0): \ 641 case REP8((x)+8) 642 643 case CS_segment: 644 case SS_segment: 645 case DS_segment: 646 case ES_segment: 647 case FS_segment: 648 case GS_segment: 649 // Seems dubious 650 LP64_ONLY(assert(false, "shouldn't have that prefix")); 651 assert(ip == inst+1, "only one prefix allowed"); 652 goto again_after_prefix; 653 654 case 0x67: 655 case REX: 656 case REX_B: 657 case REX_X: 658 case REX_XB: 659 case REX_R: 660 case REX_RB: 661 case REX_RX: 662 case REX_RXB: 663 NOT_LP64(assert(false, "64bit prefixes")); 664 goto again_after_prefix; 665 666 case REX_W: 667 case REX_WB: 668 case REX_WX: 669 case REX_WXB: 670 case REX_WR: 671 case REX_WRB: 672 case REX_WRX: 673 case REX_WRXB: 674 NOT_LP64(assert(false, "64bit prefixes")); 675 is_64bit = true; 676 goto again_after_prefix; 677 678 case 0xFF: // pushq a; decl a; incl a; call a; jmp a 679 case 0x88: // movb a, r 680 case 0x89: // movl a, r 681 case 0x8A: // movb r, a 682 case 0x8B: // movl r, a 683 case 0x8F: // popl a 684 debug_only(has_disp32 = true); 685 break; 686 687 case 0x68: // pushq #32 688 if (which == end_pc_operand) { 689 return ip + 4; 690 } 691 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); 692 return ip; // not produced by emit_operand 693 694 case 0x66: // movw ... (size prefix) 695 again_after_size_prefix2: 696 switch (0xFF & *ip++) { 697 case REX: 698 case REX_B: 699 case REX_X: 700 case REX_XB: 701 case REX_R: 702 case REX_RB: 703 case REX_RX: 704 case REX_RXB: 705 case REX_W: 706 case REX_WB: 707 case REX_WX: 708 case REX_WXB: 709 case REX_WR: 710 case REX_WRB: 711 case REX_WRX: 712 case REX_WRXB: 713 NOT_LP64(assert(false, "64bit prefix found")); 714 goto again_after_size_prefix2; 715 case 0x8B: // movw r, a 716 case 0x89: // movw a, r 717 debug_only(has_disp32 = true); 718 break; 719 case 0xC7: // movw a, #16 720 debug_only(has_disp32 = true); 721 tail_size = 2; // the imm16 722 break; 723 case 0x0F: // several SSE/SSE2 variants 724 ip--; // reparse the 0x0F 725 goto again_after_prefix; 726 default: 727 ShouldNotReachHere(); 728 } 729 break; 730 731 case REP8(0xB8): // movl/q r, #32/#64(oop?) 732 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); 733 // these asserts are somewhat nonsensical 734 #ifndef _LP64 735 assert(which == imm_operand || which == disp32_operand, 736 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip))); 737 #else 738 assert((which == call32_operand || which == imm_operand) && is_64bit || 739 which == narrow_oop_operand && !is_64bit, 740 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip))); 741 #endif // _LP64 742 return ip; 743 744 case 0x69: // imul r, a, #32 745 case 0xC7: // movl a, #32(oop?) 746 tail_size = 4; 747 debug_only(has_disp32 = true); // has both kinds of operands! 748 break; 749 750 case 0x0F: // movx..., etc. 751 switch (0xFF & *ip++) { 752 case 0x3A: // pcmpestri 753 tail_size = 1; 754 case 0x38: // ptest, pmovzxbw 755 ip++; // skip opcode 756 debug_only(has_disp32 = true); // has both kinds of operands! 757 break; 758 759 case 0x70: // pshufd r, r/a, #8 760 debug_only(has_disp32 = true); // has both kinds of operands! 761 case 0x73: // psrldq r, #8 762 tail_size = 1; 763 break; 764 765 case 0x12: // movlps 766 case 0x28: // movaps 767 case 0x2E: // ucomiss 768 case 0x2F: // comiss 769 case 0x54: // andps 770 case 0x55: // andnps 771 case 0x56: // orps 772 case 0x57: // xorps 773 case 0x6E: // movd 774 case 0x7E: // movd 775 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush 776 debug_only(has_disp32 = true); 777 break; 778 779 case 0xAD: // shrd r, a, %cl 780 case 0xAF: // imul r, a 781 case 0xBE: // movsbl r, a (movsxb) 782 case 0xBF: // movswl r, a (movsxw) 783 case 0xB6: // movzbl r, a (movzxb) 784 case 0xB7: // movzwl r, a (movzxw) 785 case REP16(0x40): // cmovl cc, r, a 786 case 0xB0: // cmpxchgb 787 case 0xB1: // cmpxchg 788 case 0xC1: // xaddl 789 case 0xC7: // cmpxchg8 790 case REP16(0x90): // setcc a 791 debug_only(has_disp32 = true); 792 // fall out of the switch to decode the address 793 break; 794 795 case 0xC4: // pinsrw r, a, #8 796 debug_only(has_disp32 = true); 797 case 0xC5: // pextrw r, r, #8 798 tail_size = 1; // the imm8 799 break; 800 801 case 0xAC: // shrd r, a, #8 802 debug_only(has_disp32 = true); 803 tail_size = 1; // the imm8 804 break; 805 806 case REP16(0x80): // jcc rdisp32 807 if (which == end_pc_operand) return ip + 4; 808 assert(which == call32_operand, "jcc has no disp32 or imm"); 809 return ip; 810 default: 811 ShouldNotReachHere(); 812 } 813 break; 814 815 case 0x81: // addl a, #32; addl r, #32 816 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 817 // on 32bit in the case of cmpl, the imm might be an oop 818 tail_size = 4; 819 debug_only(has_disp32 = true); // has both kinds of operands! 820 break; 821 822 case 0x83: // addl a, #8; addl r, #8 823 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 824 debug_only(has_disp32 = true); // has both kinds of operands! 825 tail_size = 1; 826 break; 827 828 case 0x9B: 829 switch (0xFF & *ip++) { 830 case 0xD9: // fnstcw a 831 debug_only(has_disp32 = true); 832 break; 833 default: 834 ShouldNotReachHere(); 835 } 836 break; 837 838 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a 839 case REP4(0x10): // adc... 840 case REP4(0x20): // and... 841 case REP4(0x30): // xor... 842 case REP4(0x08): // or... 843 case REP4(0x18): // sbb... 844 case REP4(0x28): // sub... 845 case 0xF7: // mull a 846 case 0x8D: // lea r, a 847 case 0x87: // xchg r, a 848 case REP4(0x38): // cmp... 849 case 0x85: // test r, a 850 debug_only(has_disp32 = true); // has both kinds of operands! 851 break; 852 853 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 854 case 0xC6: // movb a, #8 855 case 0x80: // cmpb a, #8 856 case 0x6B: // imul r, a, #8 857 debug_only(has_disp32 = true); // has both kinds of operands! 858 tail_size = 1; // the imm8 859 break; 860 861 case 0xC4: // VEX_3bytes 862 case 0xC5: // VEX_2bytes 863 assert((UseAVX > 0), "shouldn't have VEX prefix"); 864 assert(ip == inst+1, "no prefixes allowed"); 865 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions 866 // but they have prefix 0x0F and processed when 0x0F processed above. 867 // 868 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES 869 // instructions (these instructions are not supported in 64-bit mode). 870 // To distinguish them bits [7:6] are set in the VEX second byte since 871 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set 872 // those VEX bits REX and vvvv bits are inverted. 873 // 874 // Fortunately C2 doesn't generate these instructions so we don't need 875 // to check for them in product version. 876 877 // Check second byte 878 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions")); 879 880 // First byte 881 if ((0xFF & *inst) == VEX_3bytes) { 882 ip++; // third byte 883 is_64bit = ((VEX_W & *ip) == VEX_W); 884 } 885 ip++; // opcode 886 // To find the end of instruction (which == end_pc_operand). 887 switch (0xFF & *ip) { 888 case 0x61: // pcmpestri r, r/a, #8 889 case 0x70: // pshufd r, r/a, #8 890 case 0x73: // psrldq r, #8 891 tail_size = 1; // the imm8 892 break; 893 default: 894 break; 895 } 896 ip++; // skip opcode 897 debug_only(has_disp32 = true); // has both kinds of operands! 898 break; 899 900 case 0x62: // EVEX_4bytes 901 assert((UseAVX > 0), "shouldn't have EVEX prefix"); 902 assert(ip == inst+1, "no prefixes allowed"); 903 // no EVEX collisions, all instructions that have 0x62 opcodes 904 // have EVEX versions and are subopcodes of 0x66 905 ip++; // skip P0 and exmaine W in P1 906 is_64bit = ((VEX_W & *ip) == VEX_W); 907 ip++; // move to P2 908 ip++; // skip P2, move to opcode 909 // To find the end of instruction (which == end_pc_operand). 910 switch (0xFF & *ip) { 911 case 0x61: // pcmpestri r, r/a, #8 912 case 0x70: // pshufd r, r/a, #8 913 case 0x73: // psrldq r, #8 914 tail_size = 1; // the imm8 915 break; 916 default: 917 break; 918 } 919 ip++; // skip opcode 920 debug_only(has_disp32 = true); // has both kinds of operands! 921 break; 922 923 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 924 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl 925 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a 926 case 0xDD: // fld_d a; fst_d a; fstp_d a 927 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a 928 case 0xDF: // fild_d a; fistp_d a 929 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a 930 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a 931 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a 932 debug_only(has_disp32 = true); 933 break; 934 935 case 0xE8: // call rdisp32 936 case 0xE9: // jmp rdisp32 937 if (which == end_pc_operand) return ip + 4; 938 assert(which == call32_operand, "call has no disp32 or imm"); 939 return ip; 940 941 case 0xF0: // Lock 942 assert(os::is_MP(), "only on MP"); 943 goto again_after_prefix; 944 945 case 0xF3: // For SSE 946 case 0xF2: // For SSE2 947 switch (0xFF & *ip++) { 948 case REX: 949 case REX_B: 950 case REX_X: 951 case REX_XB: 952 case REX_R: 953 case REX_RB: 954 case REX_RX: 955 case REX_RXB: 956 case REX_W: 957 case REX_WB: 958 case REX_WX: 959 case REX_WXB: 960 case REX_WR: 961 case REX_WRB: 962 case REX_WRX: 963 case REX_WRXB: 964 NOT_LP64(assert(false, "found 64bit prefix")); 965 ip++; 966 default: 967 ip++; 968 } 969 debug_only(has_disp32 = true); // has both kinds of operands! 970 break; 971 972 default: 973 ShouldNotReachHere(); 974 975 #undef REP8 976 #undef REP16 977 } 978 979 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); 980 #ifdef _LP64 981 assert(which != imm_operand, "instruction is not a movq reg, imm64"); 982 #else 983 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); 984 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); 985 #endif // LP64 986 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); 987 988 // parse the output of emit_operand 989 int op2 = 0xFF & *ip++; 990 int base = op2 & 0x07; 991 int op3 = -1; 992 const int b100 = 4; 993 const int b101 = 5; 994 if (base == b100 && (op2 >> 6) != 3) { 995 op3 = 0xFF & *ip++; 996 base = op3 & 0x07; // refetch the base 997 } 998 // now ip points at the disp (if any) 999 1000 switch (op2 >> 6) { 1001 case 0: 1002 // [00 reg 100][ss index base] 1003 // [00 reg 100][00 100 esp] 1004 // [00 reg base] 1005 // [00 reg 100][ss index 101][disp32] 1006 // [00 reg 101] [disp32] 1007 1008 if (base == b101) { 1009 if (which == disp32_operand) 1010 return ip; // caller wants the disp32 1011 ip += 4; // skip the disp32 1012 } 1013 break; 1014 1015 case 1: 1016 // [01 reg 100][ss index base][disp8] 1017 // [01 reg 100][00 100 esp][disp8] 1018 // [01 reg base] [disp8] 1019 ip += 1; // skip the disp8 1020 break; 1021 1022 case 2: 1023 // [10 reg 100][ss index base][disp32] 1024 // [10 reg 100][00 100 esp][disp32] 1025 // [10 reg base] [disp32] 1026 if (which == disp32_operand) 1027 return ip; // caller wants the disp32 1028 ip += 4; // skip the disp32 1029 break; 1030 1031 case 3: 1032 // [11 reg base] (not a memory addressing mode) 1033 break; 1034 } 1035 1036 if (which == end_pc_operand) { 1037 return ip + tail_size; 1038 } 1039 1040 #ifdef _LP64 1041 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); 1042 #else 1043 assert(which == imm_operand, "instruction has only an imm field"); 1044 #endif // LP64 1045 return ip; 1046 } 1047 1048 address Assembler::locate_next_instruction(address inst) { 1049 // Secretly share code with locate_operand: 1050 return locate_operand(inst, end_pc_operand); 1051 } 1052 1053 1054 #ifdef ASSERT 1055 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { 1056 address inst = inst_mark(); 1057 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); 1058 address opnd; 1059 1060 Relocation* r = rspec.reloc(); 1061 if (r->type() == relocInfo::none) { 1062 return; 1063 } else if (r->is_call() || format == call32_operand) { 1064 // assert(format == imm32_operand, "cannot specify a nonzero format"); 1065 opnd = locate_operand(inst, call32_operand); 1066 } else if (r->is_data()) { 1067 assert(format == imm_operand || format == disp32_operand 1068 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); 1069 opnd = locate_operand(inst, (WhichOperand)format); 1070 } else { 1071 assert(format == imm_operand, "cannot specify a format"); 1072 return; 1073 } 1074 assert(opnd == pc(), "must put operand where relocs can find it"); 1075 } 1076 #endif // ASSERT 1077 1078 void Assembler::emit_operand32(Register reg, Address adr) { 1079 assert(reg->encoding() < 8, "no extended registers"); 1080 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1081 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1082 adr._rspec); 1083 } 1084 1085 void Assembler::emit_operand(Register reg, Address adr, 1086 int rip_relative_correction) { 1087 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1088 adr._rspec, 1089 rip_relative_correction); 1090 } 1091 1092 void Assembler::emit_operand(XMMRegister reg, Address adr) { 1093 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1094 adr._rspec); 1095 } 1096 1097 // MMX operations 1098 void Assembler::emit_operand(MMXRegister reg, Address adr) { 1099 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1100 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1101 } 1102 1103 // work around gcc (3.2.1-7a) bug 1104 void Assembler::emit_operand(Address adr, MMXRegister reg) { 1105 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1106 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1107 } 1108 1109 1110 void Assembler::emit_farith(int b1, int b2, int i) { 1111 assert(isByte(b1) && isByte(b2), "wrong opcode"); 1112 assert(0 <= i && i < 8, "illegal stack offset"); 1113 emit_int8(b1); 1114 emit_int8(b2 + i); 1115 } 1116 1117 1118 // Now the Assembler instructions (identical for 32/64 bits) 1119 1120 void Assembler::adcl(Address dst, int32_t imm32) { 1121 InstructionMark im(this); 1122 prefix(dst); 1123 emit_arith_operand(0x81, rdx, dst, imm32); 1124 } 1125 1126 void Assembler::adcl(Address dst, Register src) { 1127 InstructionMark im(this); 1128 prefix(dst, src); 1129 emit_int8(0x11); 1130 emit_operand(src, dst); 1131 } 1132 1133 void Assembler::adcl(Register dst, int32_t imm32) { 1134 prefix(dst); 1135 emit_arith(0x81, 0xD0, dst, imm32); 1136 } 1137 1138 void Assembler::adcl(Register dst, Address src) { 1139 InstructionMark im(this); 1140 prefix(src, dst); 1141 emit_int8(0x13); 1142 emit_operand(dst, src); 1143 } 1144 1145 void Assembler::adcl(Register dst, Register src) { 1146 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1147 emit_arith(0x13, 0xC0, dst, src); 1148 } 1149 1150 void Assembler::addl(Address dst, int32_t imm32) { 1151 InstructionMark im(this); 1152 prefix(dst); 1153 emit_arith_operand(0x81, rax, dst, imm32); 1154 } 1155 1156 void Assembler::addl(Address dst, Register src) { 1157 InstructionMark im(this); 1158 prefix(dst, src); 1159 emit_int8(0x01); 1160 emit_operand(src, dst); 1161 } 1162 1163 void Assembler::addl(Register dst, int32_t imm32) { 1164 prefix(dst); 1165 emit_arith(0x81, 0xC0, dst, imm32); 1166 } 1167 1168 void Assembler::addl(Register dst, Address src) { 1169 InstructionMark im(this); 1170 prefix(src, dst); 1171 emit_int8(0x03); 1172 emit_operand(dst, src); 1173 } 1174 1175 void Assembler::addl(Register dst, Register src) { 1176 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1177 emit_arith(0x03, 0xC0, dst, src); 1178 } 1179 1180 void Assembler::addr_nop_4() { 1181 assert(UseAddressNop, "no CPU support"); 1182 // 4 bytes: NOP DWORD PTR [EAX+0] 1183 emit_int8(0x0F); 1184 emit_int8(0x1F); 1185 emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); 1186 emit_int8(0); // 8-bits offset (1 byte) 1187 } 1188 1189 void Assembler::addr_nop_5() { 1190 assert(UseAddressNop, "no CPU support"); 1191 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset 1192 emit_int8(0x0F); 1193 emit_int8(0x1F); 1194 emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); 1195 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1196 emit_int8(0); // 8-bits offset (1 byte) 1197 } 1198 1199 void Assembler::addr_nop_7() { 1200 assert(UseAddressNop, "no CPU support"); 1201 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset 1202 emit_int8(0x0F); 1203 emit_int8(0x1F); 1204 emit_int8((unsigned char)0x80); 1205 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); 1206 emit_int32(0); // 32-bits offset (4 bytes) 1207 } 1208 1209 void Assembler::addr_nop_8() { 1210 assert(UseAddressNop, "no CPU support"); 1211 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset 1212 emit_int8(0x0F); 1213 emit_int8(0x1F); 1214 emit_int8((unsigned char)0x84); 1215 // emit_rm(cbuf, 0x2, EAX_enc, 0x4); 1216 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1217 emit_int32(0); // 32-bits offset (4 bytes) 1218 } 1219 1220 void Assembler::addsd(XMMRegister dst, XMMRegister src) { 1221 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1222 if (VM_Version::supports_evex()) { 1223 emit_simd_arith_q(0x58, dst, src, VEX_SIMD_F2); 1224 } else { 1225 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2); 1226 } 1227 } 1228 1229 void Assembler::addsd(XMMRegister dst, Address src) { 1230 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1231 if (VM_Version::supports_evex()) { 1232 tuple_type = EVEX_T1S; 1233 input_size_in_bits = EVEX_64bit; 1234 emit_simd_arith_q(0x58, dst, src, VEX_SIMD_F2); 1235 } else { 1236 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2); 1237 } 1238 } 1239 1240 void Assembler::addss(XMMRegister dst, XMMRegister src) { 1241 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1242 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3); 1243 } 1244 1245 void Assembler::addss(XMMRegister dst, Address src) { 1246 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1247 if (VM_Version::supports_evex()) { 1248 tuple_type = EVEX_T1S; 1249 input_size_in_bits = EVEX_32bit; 1250 } 1251 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3); 1252 } 1253 1254 void Assembler::aesdec(XMMRegister dst, Address src) { 1255 assert(VM_Version::supports_aes(), ""); 1256 InstructionMark im(this); 1257 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1258 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1259 emit_int8((unsigned char)0xDE); 1260 emit_operand(dst, src); 1261 } 1262 1263 void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1264 assert(VM_Version::supports_aes(), ""); 1265 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1266 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1267 emit_int8((unsigned char)0xDE); 1268 emit_int8(0xC0 | encode); 1269 } 1270 1271 void Assembler::aesdeclast(XMMRegister dst, Address src) { 1272 assert(VM_Version::supports_aes(), ""); 1273 InstructionMark im(this); 1274 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1275 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1276 emit_int8((unsigned char)0xDF); 1277 emit_operand(dst, src); 1278 } 1279 1280 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1281 assert(VM_Version::supports_aes(), ""); 1282 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1283 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1284 emit_int8((unsigned char)0xDF); 1285 emit_int8((unsigned char)(0xC0 | encode)); 1286 } 1287 1288 void Assembler::aesenc(XMMRegister dst, Address src) { 1289 assert(VM_Version::supports_aes(), ""); 1290 InstructionMark im(this); 1291 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1292 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1293 emit_int8((unsigned char)0xDC); 1294 emit_operand(dst, src); 1295 } 1296 1297 void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1298 assert(VM_Version::supports_aes(), ""); 1299 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1300 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1301 emit_int8((unsigned char)0xDC); 1302 emit_int8(0xC0 | encode); 1303 } 1304 1305 void Assembler::aesenclast(XMMRegister dst, Address src) { 1306 assert(VM_Version::supports_aes(), ""); 1307 InstructionMark im(this); 1308 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1309 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1310 emit_int8((unsigned char)0xDD); 1311 emit_operand(dst, src); 1312 } 1313 1314 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1315 assert(VM_Version::supports_aes(), ""); 1316 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1317 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1318 emit_int8((unsigned char)0xDD); 1319 emit_int8((unsigned char)(0xC0 | encode)); 1320 } 1321 1322 1323 void Assembler::andl(Address dst, int32_t imm32) { 1324 InstructionMark im(this); 1325 prefix(dst); 1326 emit_int8((unsigned char)0x81); 1327 emit_operand(rsp, dst, 4); 1328 emit_int32(imm32); 1329 } 1330 1331 void Assembler::andl(Register dst, int32_t imm32) { 1332 prefix(dst); 1333 emit_arith(0x81, 0xE0, dst, imm32); 1334 } 1335 1336 void Assembler::andl(Register dst, Address src) { 1337 InstructionMark im(this); 1338 prefix(src, dst); 1339 emit_int8(0x23); 1340 emit_operand(dst, src); 1341 } 1342 1343 void Assembler::andl(Register dst, Register src) { 1344 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1345 emit_arith(0x23, 0xC0, dst, src); 1346 } 1347 1348 void Assembler::andnl(Register dst, Register src1, Register src2) { 1349 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1350 int encode = vex_prefix_0F38_and_encode_legacy(dst, src1, src2, false); 1351 emit_int8((unsigned char)0xF2); 1352 emit_int8((unsigned char)(0xC0 | encode)); 1353 } 1354 1355 void Assembler::andnl(Register dst, Register src1, Address src2) { 1356 InstructionMark im(this); 1357 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1358 vex_prefix_0F38_legacy(dst, src1, src2, false); 1359 emit_int8((unsigned char)0xF2); 1360 emit_operand(dst, src2); 1361 } 1362 1363 void Assembler::bsfl(Register dst, Register src) { 1364 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1365 emit_int8(0x0F); 1366 emit_int8((unsigned char)0xBC); 1367 emit_int8((unsigned char)(0xC0 | encode)); 1368 } 1369 1370 void Assembler::bsrl(Register dst, Register src) { 1371 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1372 emit_int8(0x0F); 1373 emit_int8((unsigned char)0xBD); 1374 emit_int8((unsigned char)(0xC0 | encode)); 1375 } 1376 1377 void Assembler::bswapl(Register reg) { // bswap 1378 int encode = prefix_and_encode(reg->encoding()); 1379 emit_int8(0x0F); 1380 emit_int8((unsigned char)(0xC8 | encode)); 1381 } 1382 1383 void Assembler::blsil(Register dst, Register src) { 1384 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1385 int encode = vex_prefix_0F38_and_encode_legacy(rbx, dst, src, false); 1386 emit_int8((unsigned char)0xF3); 1387 emit_int8((unsigned char)(0xC0 | encode)); 1388 } 1389 1390 void Assembler::blsil(Register dst, Address src) { 1391 InstructionMark im(this); 1392 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1393 vex_prefix_0F38_legacy(rbx, dst, src, false); 1394 emit_int8((unsigned char)0xF3); 1395 emit_operand(rbx, src); 1396 } 1397 1398 void Assembler::blsmskl(Register dst, Register src) { 1399 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1400 int encode = vex_prefix_0F38_and_encode_legacy(rdx, dst, src, false); 1401 emit_int8((unsigned char)0xF3); 1402 emit_int8((unsigned char)(0xC0 | encode)); 1403 } 1404 1405 void Assembler::blsmskl(Register dst, Address src) { 1406 InstructionMark im(this); 1407 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1408 vex_prefix_0F38(rdx, dst, src, false); 1409 emit_int8((unsigned char)0xF3); 1410 emit_operand(rdx, src); 1411 } 1412 1413 void Assembler::blsrl(Register dst, Register src) { 1414 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1415 int encode = vex_prefix_0F38_and_encode_legacy(rcx, dst, src, false); 1416 emit_int8((unsigned char)0xF3); 1417 emit_int8((unsigned char)(0xC0 | encode)); 1418 } 1419 1420 void Assembler::blsrl(Register dst, Address src) { 1421 InstructionMark im(this); 1422 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1423 vex_prefix_0F38_legacy(rcx, dst, src, false); 1424 emit_int8((unsigned char)0xF3); 1425 emit_operand(rcx, src); 1426 } 1427 1428 void Assembler::call(Label& L, relocInfo::relocType rtype) { 1429 // suspect disp32 is always good 1430 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); 1431 1432 if (L.is_bound()) { 1433 const int long_size = 5; 1434 int offs = (int)( target(L) - pc() ); 1435 assert(offs <= 0, "assembler error"); 1436 InstructionMark im(this); 1437 // 1110 1000 #32-bit disp 1438 emit_int8((unsigned char)0xE8); 1439 emit_data(offs - long_size, rtype, operand); 1440 } else { 1441 InstructionMark im(this); 1442 // 1110 1000 #32-bit disp 1443 L.add_patch_at(code(), locator()); 1444 1445 emit_int8((unsigned char)0xE8); 1446 emit_data(int(0), rtype, operand); 1447 } 1448 } 1449 1450 void Assembler::call(Register dst) { 1451 int encode = prefix_and_encode(dst->encoding()); 1452 emit_int8((unsigned char)0xFF); 1453 emit_int8((unsigned char)(0xD0 | encode)); 1454 } 1455 1456 1457 void Assembler::call(Address adr) { 1458 InstructionMark im(this); 1459 prefix(adr); 1460 emit_int8((unsigned char)0xFF); 1461 emit_operand(rdx, adr); 1462 } 1463 1464 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1465 assert(entry != NULL, "call most probably wrong"); 1466 InstructionMark im(this); 1467 emit_int8((unsigned char)0xE8); 1468 intptr_t disp = entry - (pc() + sizeof(int32_t)); 1469 assert(is_simm32(disp), "must be 32bit offset (call2)"); 1470 // Technically, should use call32_operand, but this format is 1471 // implied by the fact that we're emitting a call instruction. 1472 1473 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1474 emit_data((int) disp, rspec, operand); 1475 } 1476 1477 void Assembler::cdql() { 1478 emit_int8((unsigned char)0x99); 1479 } 1480 1481 void Assembler::cld() { 1482 emit_int8((unsigned char)0xFC); 1483 } 1484 1485 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1486 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1487 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1488 emit_int8(0x0F); 1489 emit_int8(0x40 | cc); 1490 emit_int8((unsigned char)(0xC0 | encode)); 1491 } 1492 1493 1494 void Assembler::cmovl(Condition cc, Register dst, Address src) { 1495 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1496 prefix(src, dst); 1497 emit_int8(0x0F); 1498 emit_int8(0x40 | cc); 1499 emit_operand(dst, src); 1500 } 1501 1502 void Assembler::cmpb(Address dst, int imm8) { 1503 InstructionMark im(this); 1504 prefix(dst); 1505 emit_int8((unsigned char)0x80); 1506 emit_operand(rdi, dst, 1); 1507 emit_int8(imm8); 1508 } 1509 1510 void Assembler::cmpl(Address dst, int32_t imm32) { 1511 InstructionMark im(this); 1512 prefix(dst); 1513 emit_int8((unsigned char)0x81); 1514 emit_operand(rdi, dst, 4); 1515 emit_int32(imm32); 1516 } 1517 1518 void Assembler::cmpl(Register dst, int32_t imm32) { 1519 prefix(dst); 1520 emit_arith(0x81, 0xF8, dst, imm32); 1521 } 1522 1523 void Assembler::cmpl(Register dst, Register src) { 1524 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1525 emit_arith(0x3B, 0xC0, dst, src); 1526 } 1527 1528 1529 void Assembler::cmpl(Register dst, Address src) { 1530 InstructionMark im(this); 1531 prefix(src, dst); 1532 emit_int8((unsigned char)0x3B); 1533 emit_operand(dst, src); 1534 } 1535 1536 void Assembler::cmpw(Address dst, int imm16) { 1537 InstructionMark im(this); 1538 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); 1539 emit_int8(0x66); 1540 emit_int8((unsigned char)0x81); 1541 emit_operand(rdi, dst, 2); 1542 emit_int16(imm16); 1543 } 1544 1545 // The 32-bit cmpxchg compares the value at adr with the contents of rax, 1546 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1547 // The ZF is set if the compared values were equal, and cleared otherwise. 1548 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg 1549 InstructionMark im(this); 1550 prefix(adr, reg); 1551 emit_int8(0x0F); 1552 emit_int8((unsigned char)0xB1); 1553 emit_operand(reg, adr); 1554 } 1555 1556 // The 8-bit cmpxchg compares the value at adr with the contents of rax, 1557 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1558 // The ZF is set if the compared values were equal, and cleared otherwise. 1559 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg 1560 InstructionMark im(this); 1561 prefix(adr, reg, true); 1562 emit_int8(0x0F); 1563 emit_int8((unsigned char)0xB0); 1564 emit_operand(reg, adr); 1565 } 1566 1567 void Assembler::comisd(XMMRegister dst, Address src) { 1568 // NOTE: dbx seems to decode this as comiss even though the 1569 // 0x66 is there. Strangly ucomisd comes out correct 1570 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1571 if (VM_Version::supports_evex()) { 1572 tuple_type = EVEX_T1S; 1573 input_size_in_bits = EVEX_64bit; 1574 emit_simd_arith_nonds_q(0x2F, dst, src, VEX_SIMD_66, true); 1575 } else { 1576 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66); 1577 } 1578 } 1579 1580 void Assembler::comisd(XMMRegister dst, XMMRegister src) { 1581 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1582 if (VM_Version::supports_evex()) { 1583 emit_simd_arith_nonds_q(0x2F, dst, src, VEX_SIMD_66, true); 1584 } else { 1585 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66); 1586 } 1587 } 1588 1589 void Assembler::comiss(XMMRegister dst, Address src) { 1590 if (VM_Version::supports_evex()) { 1591 tuple_type = EVEX_T1S; 1592 input_size_in_bits = EVEX_32bit; 1593 } 1594 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1595 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE, true); 1596 } 1597 1598 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1599 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1600 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE, true); 1601 } 1602 1603 void Assembler::cpuid() { 1604 emit_int8(0x0F); 1605 emit_int8((unsigned char)0xA2); 1606 } 1607 1608 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1609 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1610 emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3); 1611 } 1612 1613 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { 1614 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1615 emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE); 1616 } 1617 1618 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1619 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1620 if (VM_Version::supports_evex()) { 1621 emit_simd_arith_q(0x5A, dst, src, VEX_SIMD_F2); 1622 } else { 1623 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2); 1624 } 1625 } 1626 1627 void Assembler::cvtsd2ss(XMMRegister dst, Address src) { 1628 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1629 if (VM_Version::supports_evex()) { 1630 tuple_type = EVEX_T1F; 1631 input_size_in_bits = EVEX_64bit; 1632 emit_simd_arith_q(0x5A, dst, src, VEX_SIMD_F2); 1633 } else { 1634 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2); 1635 } 1636 } 1637 1638 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { 1639 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1640 int encode = 0; 1641 if (VM_Version::supports_evex()) { 1642 encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2, true); 1643 } else { 1644 encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, false); 1645 } 1646 emit_int8(0x2A); 1647 emit_int8((unsigned char)(0xC0 | encode)); 1648 } 1649 1650 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { 1651 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1652 if (VM_Version::supports_evex()) { 1653 tuple_type = EVEX_T1S; 1654 input_size_in_bits = EVEX_32bit; 1655 emit_simd_arith_q(0x2A, dst, src, VEX_SIMD_F2, true); 1656 } else { 1657 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2); 1658 } 1659 } 1660 1661 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { 1662 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1663 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, true); 1664 emit_int8(0x2A); 1665 emit_int8((unsigned char)(0xC0 | encode)); 1666 } 1667 1668 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { 1669 if (VM_Version::supports_evex()) { 1670 tuple_type = EVEX_T1S; 1671 input_size_in_bits = EVEX_32bit; 1672 } 1673 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1674 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3, true); 1675 } 1676 1677 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { 1678 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1679 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3, true); 1680 emit_int8(0x2A); 1681 emit_int8((unsigned char)(0xC0 | encode)); 1682 } 1683 1684 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 1685 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1686 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3); 1687 } 1688 1689 void Assembler::cvtss2sd(XMMRegister dst, Address src) { 1690 if (VM_Version::supports_evex()) { 1691 tuple_type = EVEX_T1S; 1692 input_size_in_bits = EVEX_32bit; 1693 } 1694 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1695 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3); 1696 } 1697 1698 1699 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { 1700 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1701 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, true); 1702 emit_int8(0x2C); 1703 emit_int8((unsigned char)(0xC0 | encode)); 1704 } 1705 1706 void Assembler::cvttss2sil(Register dst, XMMRegister src) { 1707 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1708 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, true); 1709 emit_int8(0x2C); 1710 emit_int8((unsigned char)(0xC0 | encode)); 1711 } 1712 1713 void Assembler::decl(Address dst) { 1714 // Don't use it directly. Use MacroAssembler::decrement() instead. 1715 InstructionMark im(this); 1716 prefix(dst); 1717 emit_int8((unsigned char)0xFF); 1718 emit_operand(rcx, dst); 1719 } 1720 1721 void Assembler::divsd(XMMRegister dst, Address src) { 1722 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1723 if (VM_Version::supports_evex()) { 1724 tuple_type = EVEX_T1S; 1725 input_size_in_bits = EVEX_64bit; 1726 emit_simd_arith_q(0x5E, dst, src, VEX_SIMD_F2); 1727 } else { 1728 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2); 1729 } 1730 } 1731 1732 void Assembler::divsd(XMMRegister dst, XMMRegister src) { 1733 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1734 if (VM_Version::supports_evex()) { 1735 emit_simd_arith_q(0x5E, dst, src, VEX_SIMD_F2); 1736 } else { 1737 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2); 1738 } 1739 } 1740 1741 void Assembler::divss(XMMRegister dst, Address src) { 1742 if (VM_Version::supports_evex()) { 1743 tuple_type = EVEX_T1S; 1744 input_size_in_bits = EVEX_32bit; 1745 } 1746 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1747 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3); 1748 } 1749 1750 void Assembler::divss(XMMRegister dst, XMMRegister src) { 1751 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1752 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3); 1753 } 1754 1755 void Assembler::emms() { 1756 NOT_LP64(assert(VM_Version::supports_mmx(), "")); 1757 emit_int8(0x0F); 1758 emit_int8(0x77); 1759 } 1760 1761 void Assembler::hlt() { 1762 emit_int8((unsigned char)0xF4); 1763 } 1764 1765 void Assembler::idivl(Register src) { 1766 int encode = prefix_and_encode(src->encoding()); 1767 emit_int8((unsigned char)0xF7); 1768 emit_int8((unsigned char)(0xF8 | encode)); 1769 } 1770 1771 void Assembler::divl(Register src) { // Unsigned 1772 int encode = prefix_and_encode(src->encoding()); 1773 emit_int8((unsigned char)0xF7); 1774 emit_int8((unsigned char)(0xF0 | encode)); 1775 } 1776 1777 void Assembler::imull(Register dst, Register src) { 1778 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1779 emit_int8(0x0F); 1780 emit_int8((unsigned char)0xAF); 1781 emit_int8((unsigned char)(0xC0 | encode)); 1782 } 1783 1784 1785 void Assembler::imull(Register dst, Register src, int value) { 1786 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1787 if (is8bit(value)) { 1788 emit_int8(0x6B); 1789 emit_int8((unsigned char)(0xC0 | encode)); 1790 emit_int8(value & 0xFF); 1791 } else { 1792 emit_int8(0x69); 1793 emit_int8((unsigned char)(0xC0 | encode)); 1794 emit_int32(value); 1795 } 1796 } 1797 1798 void Assembler::imull(Register dst, Address src) { 1799 InstructionMark im(this); 1800 prefix(src, dst); 1801 emit_int8(0x0F); 1802 emit_int8((unsigned char) 0xAF); 1803 emit_operand(dst, src); 1804 } 1805 1806 1807 void Assembler::incl(Address dst) { 1808 // Don't use it directly. Use MacroAssembler::increment() instead. 1809 InstructionMark im(this); 1810 prefix(dst); 1811 emit_int8((unsigned char)0xFF); 1812 emit_operand(rax, dst); 1813 } 1814 1815 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { 1816 InstructionMark im(this); 1817 assert((0 <= cc) && (cc < 16), "illegal cc"); 1818 if (L.is_bound()) { 1819 address dst = target(L); 1820 assert(dst != NULL, "jcc most probably wrong"); 1821 1822 const int short_size = 2; 1823 const int long_size = 6; 1824 intptr_t offs = (intptr_t)dst - (intptr_t)pc(); 1825 if (maybe_short && is8bit(offs - short_size)) { 1826 // 0111 tttn #8-bit disp 1827 emit_int8(0x70 | cc); 1828 emit_int8((offs - short_size) & 0xFF); 1829 } else { 1830 // 0000 1111 1000 tttn #32-bit disp 1831 assert(is_simm32(offs - long_size), 1832 "must be 32bit offset (call4)"); 1833 emit_int8(0x0F); 1834 emit_int8((unsigned char)(0x80 | cc)); 1835 emit_int32(offs - long_size); 1836 } 1837 } else { 1838 // Note: could eliminate cond. jumps to this jump if condition 1839 // is the same however, seems to be rather unlikely case. 1840 // Note: use jccb() if label to be bound is very close to get 1841 // an 8-bit displacement 1842 L.add_patch_at(code(), locator()); 1843 emit_int8(0x0F); 1844 emit_int8((unsigned char)(0x80 | cc)); 1845 emit_int32(0); 1846 } 1847 } 1848 1849 void Assembler::jccb(Condition cc, Label& L) { 1850 if (L.is_bound()) { 1851 const int short_size = 2; 1852 address entry = target(L); 1853 #ifdef ASSERT 1854 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 1855 intptr_t delta = short_branch_delta(); 1856 if (delta != 0) { 1857 dist += (dist < 0 ? (-delta) :delta); 1858 } 1859 assert(is8bit(dist), "Dispacement too large for a short jmp"); 1860 #endif 1861 intptr_t offs = (intptr_t)entry - (intptr_t)pc(); 1862 // 0111 tttn #8-bit disp 1863 emit_int8(0x70 | cc); 1864 emit_int8((offs - short_size) & 0xFF); 1865 } else { 1866 InstructionMark im(this); 1867 L.add_patch_at(code(), locator()); 1868 emit_int8(0x70 | cc); 1869 emit_int8(0); 1870 } 1871 } 1872 1873 void Assembler::jmp(Address adr) { 1874 InstructionMark im(this); 1875 prefix(adr); 1876 emit_int8((unsigned char)0xFF); 1877 emit_operand(rsp, adr); 1878 } 1879 1880 void Assembler::jmp(Label& L, bool maybe_short) { 1881 if (L.is_bound()) { 1882 address entry = target(L); 1883 assert(entry != NULL, "jmp most probably wrong"); 1884 InstructionMark im(this); 1885 const int short_size = 2; 1886 const int long_size = 5; 1887 intptr_t offs = entry - pc(); 1888 if (maybe_short && is8bit(offs - short_size)) { 1889 emit_int8((unsigned char)0xEB); 1890 emit_int8((offs - short_size) & 0xFF); 1891 } else { 1892 emit_int8((unsigned char)0xE9); 1893 emit_int32(offs - long_size); 1894 } 1895 } else { 1896 // By default, forward jumps are always 32-bit displacements, since 1897 // we can't yet know where the label will be bound. If you're sure that 1898 // the forward jump will not run beyond 256 bytes, use jmpb to 1899 // force an 8-bit displacement. 1900 InstructionMark im(this); 1901 L.add_patch_at(code(), locator()); 1902 emit_int8((unsigned char)0xE9); 1903 emit_int32(0); 1904 } 1905 } 1906 1907 void Assembler::jmp(Register entry) { 1908 int encode = prefix_and_encode(entry->encoding()); 1909 emit_int8((unsigned char)0xFF); 1910 emit_int8((unsigned char)(0xE0 | encode)); 1911 } 1912 1913 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 1914 InstructionMark im(this); 1915 emit_int8((unsigned char)0xE9); 1916 assert(dest != NULL, "must have a target"); 1917 intptr_t disp = dest - (pc() + sizeof(int32_t)); 1918 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 1919 emit_data(disp, rspec.reloc(), call32_operand); 1920 } 1921 1922 void Assembler::jmpb(Label& L) { 1923 if (L.is_bound()) { 1924 const int short_size = 2; 1925 address entry = target(L); 1926 assert(entry != NULL, "jmp most probably wrong"); 1927 #ifdef ASSERT 1928 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 1929 intptr_t delta = short_branch_delta(); 1930 if (delta != 0) { 1931 dist += (dist < 0 ? (-delta) :delta); 1932 } 1933 assert(is8bit(dist), "Dispacement too large for a short jmp"); 1934 #endif 1935 intptr_t offs = entry - pc(); 1936 emit_int8((unsigned char)0xEB); 1937 emit_int8((offs - short_size) & 0xFF); 1938 } else { 1939 InstructionMark im(this); 1940 L.add_patch_at(code(), locator()); 1941 emit_int8((unsigned char)0xEB); 1942 emit_int8(0); 1943 } 1944 } 1945 1946 void Assembler::ldmxcsr( Address src) { 1947 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1948 InstructionMark im(this); 1949 prefix(src); 1950 emit_int8(0x0F); 1951 emit_int8((unsigned char)0xAE); 1952 emit_operand(as_Register(2), src); 1953 } 1954 1955 void Assembler::leal(Register dst, Address src) { 1956 InstructionMark im(this); 1957 #ifdef _LP64 1958 emit_int8(0x67); // addr32 1959 prefix(src, dst); 1960 #endif // LP64 1961 emit_int8((unsigned char)0x8D); 1962 emit_operand(dst, src); 1963 } 1964 1965 void Assembler::lfence() { 1966 emit_int8(0x0F); 1967 emit_int8((unsigned char)0xAE); 1968 emit_int8((unsigned char)0xE8); 1969 } 1970 1971 void Assembler::lock() { 1972 emit_int8((unsigned char)0xF0); 1973 } 1974 1975 void Assembler::lzcntl(Register dst, Register src) { 1976 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 1977 emit_int8((unsigned char)0xF3); 1978 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1979 emit_int8(0x0F); 1980 emit_int8((unsigned char)0xBD); 1981 emit_int8((unsigned char)(0xC0 | encode)); 1982 } 1983 1984 // Emit mfence instruction 1985 void Assembler::mfence() { 1986 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 1987 emit_int8(0x0F); 1988 emit_int8((unsigned char)0xAE); 1989 emit_int8((unsigned char)0xF0); 1990 } 1991 1992 void Assembler::mov(Register dst, Register src) { 1993 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 1994 } 1995 1996 void Assembler::movapd(XMMRegister dst, XMMRegister src) { 1997 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1998 if (VM_Version::supports_evex()) { 1999 emit_simd_arith_nonds_q(0x28, dst, src, VEX_SIMD_66, true); 2000 } else { 2001 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66); 2002 } 2003 } 2004 2005 void Assembler::movaps(XMMRegister dst, XMMRegister src) { 2006 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2007 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE); 2008 } 2009 2010 void Assembler::movlhps(XMMRegister dst, XMMRegister src) { 2011 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2012 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, true, VEX_OPCODE_0F, 2013 false, AVX_128bit); 2014 emit_int8(0x16); 2015 emit_int8((unsigned char)(0xC0 | encode)); 2016 } 2017 2018 void Assembler::movb(Register dst, Address src) { 2019 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 2020 InstructionMark im(this); 2021 prefix(src, dst, true); 2022 emit_int8((unsigned char)0x8A); 2023 emit_operand(dst, src); 2024 } 2025 2026 void Assembler::kmovq(KRegister dst, KRegister src) { 2027 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2028 int encode = kreg_prefix_and_encode(dst, knoreg, src, VEX_SIMD_NONE, 2029 true, VEX_OPCODE_0F, true); 2030 emit_int8((unsigned char)0x90); 2031 emit_int8((unsigned char)(0xC0 | encode)); 2032 } 2033 2034 void Assembler::kmovq(KRegister dst, Address src) { 2035 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2036 int dst_enc = dst->encoding(); 2037 int nds_enc = 0; 2038 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_NONE, 2039 VEX_OPCODE_0F, true, AVX_128bit, true, true); 2040 emit_int8((unsigned char)0x90); 2041 emit_operand((Register)dst, src); 2042 } 2043 2044 void Assembler::kmovq(Address dst, KRegister src) { 2045 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2046 int src_enc = src->encoding(); 2047 int nds_enc = 0; 2048 vex_prefix(dst, nds_enc, src_enc, VEX_SIMD_NONE, 2049 VEX_OPCODE_0F, true, AVX_128bit, true, true); 2050 emit_int8((unsigned char)0x90); 2051 emit_operand((Register)src, dst); 2052 } 2053 2054 void Assembler::kmovql(KRegister dst, Register src) { 2055 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2056 bool supports_bw = VM_Version::supports_avx512bw(); 2057 VexSimdPrefix pre = supports_bw ? VEX_SIMD_F2 : VEX_SIMD_NONE; 2058 int encode = kreg_prefix_and_encode(dst, knoreg, src, pre, true, 2059 VEX_OPCODE_0F, supports_bw); 2060 emit_int8((unsigned char)0x92); 2061 emit_int8((unsigned char)(0xC0 | encode)); 2062 } 2063 2064 void Assembler::kmovdl(KRegister dst, Register src) { 2065 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2066 VexSimdPrefix pre = VM_Version::supports_avx512bw() ? VEX_SIMD_F2 : VEX_SIMD_NONE; 2067 int encode = kreg_prefix_and_encode(dst, knoreg, src, pre, true, VEX_OPCODE_0F, false); 2068 emit_int8((unsigned char)0x92); 2069 emit_int8((unsigned char)(0xC0 | encode)); 2070 } 2071 2072 void Assembler::movb(Address dst, int imm8) { 2073 InstructionMark im(this); 2074 prefix(dst); 2075 emit_int8((unsigned char)0xC6); 2076 emit_operand(rax, dst, 1); 2077 emit_int8(imm8); 2078 } 2079 2080 2081 void Assembler::movb(Address dst, Register src) { 2082 assert(src->has_byte_register(), "must have byte register"); 2083 InstructionMark im(this); 2084 prefix(dst, src, true); 2085 emit_int8((unsigned char)0x88); 2086 emit_operand(src, dst); 2087 } 2088 2089 void Assembler::movdl(XMMRegister dst, Register src) { 2090 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2091 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66, true); 2092 emit_int8(0x6E); 2093 emit_int8((unsigned char)(0xC0 | encode)); 2094 } 2095 2096 void Assembler::movdl(Register dst, XMMRegister src) { 2097 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2098 // swap src/dst to get correct prefix 2099 int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66, true); 2100 emit_int8(0x7E); 2101 emit_int8((unsigned char)(0xC0 | encode)); 2102 } 2103 2104 void Assembler::movdl(XMMRegister dst, Address src) { 2105 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2106 if (VM_Version::supports_evex()) { 2107 tuple_type = EVEX_T1S; 2108 input_size_in_bits = EVEX_32bit; 2109 } 2110 InstructionMark im(this); 2111 simd_prefix(dst, src, VEX_SIMD_66, true, VEX_OPCODE_0F); 2112 emit_int8(0x6E); 2113 emit_operand(dst, src); 2114 } 2115 2116 void Assembler::movdl(Address dst, XMMRegister src) { 2117 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2118 if (VM_Version::supports_evex()) { 2119 tuple_type = EVEX_T1S; 2120 input_size_in_bits = EVEX_32bit; 2121 } 2122 InstructionMark im(this); 2123 simd_prefix(dst, src, VEX_SIMD_66, true); 2124 emit_int8(0x7E); 2125 emit_operand(src, dst); 2126 } 2127 2128 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { 2129 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2130 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); 2131 } 2132 2133 void Assembler::movdqa(XMMRegister dst, Address src) { 2134 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2135 if (VM_Version::supports_evex()) { 2136 tuple_type = EVEX_FVM; 2137 } 2138 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); 2139 } 2140 2141 void Assembler::movdqu(XMMRegister dst, Address src) { 2142 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2143 if (VM_Version::supports_evex()) { 2144 tuple_type = EVEX_FVM; 2145 } 2146 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3); 2147 } 2148 2149 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { 2150 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2151 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3); 2152 } 2153 2154 void Assembler::movdqu(Address dst, XMMRegister src) { 2155 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2156 if (VM_Version::supports_evex()) { 2157 tuple_type = EVEX_FVM; 2158 } 2159 InstructionMark im(this); 2160 simd_prefix(dst, src, VEX_SIMD_F3, false); 2161 emit_int8(0x7F); 2162 emit_operand(src, dst); 2163 } 2164 2165 // Move Unaligned 256bit Vector 2166 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2167 assert(UseAVX > 0, ""); 2168 if (VM_Version::supports_evex()) { 2169 tuple_type = EVEX_FVM; 2170 } 2171 int vector_len = AVX_256bit; 2172 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector_len); 2173 emit_int8(0x6F); 2174 emit_int8((unsigned char)(0xC0 | encode)); 2175 } 2176 2177 void Assembler::vmovdqu(XMMRegister dst, Address src) { 2178 assert(UseAVX > 0, ""); 2179 if (VM_Version::supports_evex()) { 2180 tuple_type = EVEX_FVM; 2181 } 2182 InstructionMark im(this); 2183 int vector_len = AVX_256bit; 2184 vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector_len, false); 2185 emit_int8(0x6F); 2186 emit_operand(dst, src); 2187 } 2188 2189 void Assembler::vmovdqu(Address dst, XMMRegister src) { 2190 assert(UseAVX > 0, ""); 2191 if (VM_Version::supports_evex()) { 2192 tuple_type = EVEX_FVM; 2193 } 2194 InstructionMark im(this); 2195 int vector_len = AVX_256bit; 2196 // swap src<->dst for encoding 2197 assert(src != xnoreg, "sanity"); 2198 vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector_len, false); 2199 emit_int8(0x7F); 2200 emit_operand(src, dst); 2201 } 2202 2203 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64) 2204 void Assembler::evmovdqu(XMMRegister dst, XMMRegister src, int vector_len) { 2205 assert(UseAVX > 0, ""); 2206 int src_enc = src->encoding(); 2207 int dst_enc = dst->encoding(); 2208 int encode = vex_prefix_and_encode(dst_enc, 0, src_enc, VEX_SIMD_F3, VEX_OPCODE_0F, 2209 true, vector_len, false, false); 2210 emit_int8(0x6F); 2211 emit_int8((unsigned char)(0xC0 | encode)); 2212 } 2213 2214 void Assembler::evmovdqu(XMMRegister dst, Address src, int vector_len) { 2215 assert(UseAVX > 0, ""); 2216 InstructionMark im(this); 2217 if (VM_Version::supports_evex()) { 2218 tuple_type = EVEX_FVM; 2219 vex_prefix_q(dst, xnoreg, src, VEX_SIMD_F3, vector_len, false); 2220 } else { 2221 vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector_len, false); 2222 } 2223 emit_int8(0x6F); 2224 emit_operand(dst, src); 2225 } 2226 2227 void Assembler::evmovdqu(Address dst, XMMRegister src, int vector_len) { 2228 assert(UseAVX > 0, ""); 2229 InstructionMark im(this); 2230 assert(src != xnoreg, "sanity"); 2231 if (VM_Version::supports_evex()) { 2232 tuple_type = EVEX_FVM; 2233 // swap src<->dst for encoding 2234 vex_prefix_q(src, xnoreg, dst, VEX_SIMD_F3, vector_len, false); 2235 } else { 2236 // swap src<->dst for encoding 2237 vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector_len, false); 2238 } 2239 emit_int8(0x7F); 2240 emit_operand(src, dst); 2241 } 2242 2243 // Uses zero extension on 64bit 2244 2245 void Assembler::movl(Register dst, int32_t imm32) { 2246 int encode = prefix_and_encode(dst->encoding()); 2247 emit_int8((unsigned char)(0xB8 | encode)); 2248 emit_int32(imm32); 2249 } 2250 2251 void Assembler::movl(Register dst, Register src) { 2252 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2253 emit_int8((unsigned char)0x8B); 2254 emit_int8((unsigned char)(0xC0 | encode)); 2255 } 2256 2257 void Assembler::movl(Register dst, Address src) { 2258 InstructionMark im(this); 2259 prefix(src, dst); 2260 emit_int8((unsigned char)0x8B); 2261 emit_operand(dst, src); 2262 } 2263 2264 void Assembler::movl(Address dst, int32_t imm32) { 2265 InstructionMark im(this); 2266 prefix(dst); 2267 emit_int8((unsigned char)0xC7); 2268 emit_operand(rax, dst, 4); 2269 emit_int32(imm32); 2270 } 2271 2272 void Assembler::movl(Address dst, Register src) { 2273 InstructionMark im(this); 2274 prefix(dst, src); 2275 emit_int8((unsigned char)0x89); 2276 emit_operand(src, dst); 2277 } 2278 2279 // New cpus require to use movsd and movss to avoid partial register stall 2280 // when loading from memory. But for old Opteron use movlpd instead of movsd. 2281 // The selection is done in MacroAssembler::movdbl() and movflt(). 2282 void Assembler::movlpd(XMMRegister dst, Address src) { 2283 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2284 if (VM_Version::supports_evex()) { 2285 tuple_type = EVEX_T1S; 2286 input_size_in_bits = EVEX_32bit; 2287 } 2288 emit_simd_arith(0x12, dst, src, VEX_SIMD_66, true); 2289 } 2290 2291 void Assembler::movq( MMXRegister dst, Address src ) { 2292 assert( VM_Version::supports_mmx(), "" ); 2293 emit_int8(0x0F); 2294 emit_int8(0x6F); 2295 emit_operand(dst, src); 2296 } 2297 2298 void Assembler::movq( Address dst, MMXRegister src ) { 2299 assert( VM_Version::supports_mmx(), "" ); 2300 emit_int8(0x0F); 2301 emit_int8(0x7F); 2302 // workaround gcc (3.2.1-7a) bug 2303 // In that version of gcc with only an emit_operand(MMX, Address) 2304 // gcc will tail jump and try and reverse the parameters completely 2305 // obliterating dst in the process. By having a version available 2306 // that doesn't need to swap the args at the tail jump the bug is 2307 // avoided. 2308 emit_operand(dst, src); 2309 } 2310 2311 void Assembler::movq(XMMRegister dst, Address src) { 2312 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2313 InstructionMark im(this); 2314 if (VM_Version::supports_evex()) { 2315 tuple_type = EVEX_T1S; 2316 input_size_in_bits = EVEX_64bit; 2317 simd_prefix_q(dst, xnoreg, src, VEX_SIMD_F3, true); 2318 } else { 2319 simd_prefix(dst, src, VEX_SIMD_F3, true, VEX_OPCODE_0F); 2320 } 2321 emit_int8(0x7E); 2322 emit_operand(dst, src); 2323 } 2324 2325 void Assembler::movq(Address dst, XMMRegister src) { 2326 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2327 InstructionMark im(this); 2328 if (VM_Version::supports_evex()) { 2329 tuple_type = EVEX_T1S; 2330 input_size_in_bits = EVEX_64bit; 2331 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, true, 2332 VEX_OPCODE_0F, true, AVX_128bit); 2333 } else { 2334 simd_prefix(dst, src, VEX_SIMD_66, true); 2335 } 2336 emit_int8((unsigned char)0xD6); 2337 emit_operand(src, dst); 2338 } 2339 2340 void Assembler::movsbl(Register dst, Address src) { // movsxb 2341 InstructionMark im(this); 2342 prefix(src, dst); 2343 emit_int8(0x0F); 2344 emit_int8((unsigned char)0xBE); 2345 emit_operand(dst, src); 2346 } 2347 2348 void Assembler::movsbl(Register dst, Register src) { // movsxb 2349 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2350 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); 2351 emit_int8(0x0F); 2352 emit_int8((unsigned char)0xBE); 2353 emit_int8((unsigned char)(0xC0 | encode)); 2354 } 2355 2356 void Assembler::movsd(XMMRegister dst, XMMRegister src) { 2357 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2358 if (VM_Version::supports_evex()) { 2359 emit_simd_arith_q(0x10, dst, src, VEX_SIMD_F2, true); 2360 } else { 2361 emit_simd_arith(0x10, dst, src, VEX_SIMD_F2); 2362 } 2363 } 2364 2365 void Assembler::movsd(XMMRegister dst, Address src) { 2366 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2367 if (VM_Version::supports_evex()) { 2368 tuple_type = EVEX_T1S; 2369 input_size_in_bits = EVEX_64bit; 2370 emit_simd_arith_nonds_q(0x10, dst, src, VEX_SIMD_F2, true); 2371 } else { 2372 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2); 2373 } 2374 } 2375 2376 void Assembler::movsd(Address dst, XMMRegister src) { 2377 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2378 InstructionMark im(this); 2379 if (VM_Version::supports_evex()) { 2380 tuple_type = EVEX_T1S; 2381 input_size_in_bits = EVEX_64bit; 2382 simd_prefix_q(src, xnoreg, dst, VEX_SIMD_F2); 2383 } else { 2384 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, false); 2385 } 2386 emit_int8(0x11); 2387 emit_operand(src, dst); 2388 } 2389 2390 void Assembler::movss(XMMRegister dst, XMMRegister src) { 2391 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2392 emit_simd_arith(0x10, dst, src, VEX_SIMD_F3, true); 2393 } 2394 2395 void Assembler::movss(XMMRegister dst, Address src) { 2396 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2397 if (VM_Version::supports_evex()) { 2398 tuple_type = EVEX_T1S; 2399 input_size_in_bits = EVEX_32bit; 2400 } 2401 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3, true); 2402 } 2403 2404 void Assembler::movss(Address dst, XMMRegister src) { 2405 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2406 if (VM_Version::supports_evex()) { 2407 tuple_type = EVEX_T1S; 2408 input_size_in_bits = EVEX_32bit; 2409 } 2410 InstructionMark im(this); 2411 simd_prefix(dst, src, VEX_SIMD_F3, false); 2412 emit_int8(0x11); 2413 emit_operand(src, dst); 2414 } 2415 2416 void Assembler::movswl(Register dst, Address src) { // movsxw 2417 InstructionMark im(this); 2418 prefix(src, dst); 2419 emit_int8(0x0F); 2420 emit_int8((unsigned char)0xBF); 2421 emit_operand(dst, src); 2422 } 2423 2424 void Assembler::movswl(Register dst, Register src) { // movsxw 2425 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2426 emit_int8(0x0F); 2427 emit_int8((unsigned char)0xBF); 2428 emit_int8((unsigned char)(0xC0 | encode)); 2429 } 2430 2431 void Assembler::movw(Address dst, int imm16) { 2432 InstructionMark im(this); 2433 2434 emit_int8(0x66); // switch to 16-bit mode 2435 prefix(dst); 2436 emit_int8((unsigned char)0xC7); 2437 emit_operand(rax, dst, 2); 2438 emit_int16(imm16); 2439 } 2440 2441 void Assembler::movw(Register dst, Address src) { 2442 InstructionMark im(this); 2443 emit_int8(0x66); 2444 prefix(src, dst); 2445 emit_int8((unsigned char)0x8B); 2446 emit_operand(dst, src); 2447 } 2448 2449 void Assembler::movw(Address dst, Register src) { 2450 InstructionMark im(this); 2451 emit_int8(0x66); 2452 prefix(dst, src); 2453 emit_int8((unsigned char)0x89); 2454 emit_operand(src, dst); 2455 } 2456 2457 void Assembler::movzbl(Register dst, Address src) { // movzxb 2458 InstructionMark im(this); 2459 prefix(src, dst); 2460 emit_int8(0x0F); 2461 emit_int8((unsigned char)0xB6); 2462 emit_operand(dst, src); 2463 } 2464 2465 void Assembler::movzbl(Register dst, Register src) { // movzxb 2466 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2467 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); 2468 emit_int8(0x0F); 2469 emit_int8((unsigned char)0xB6); 2470 emit_int8(0xC0 | encode); 2471 } 2472 2473 void Assembler::movzwl(Register dst, Address src) { // movzxw 2474 InstructionMark im(this); 2475 prefix(src, dst); 2476 emit_int8(0x0F); 2477 emit_int8((unsigned char)0xB7); 2478 emit_operand(dst, src); 2479 } 2480 2481 void Assembler::movzwl(Register dst, Register src) { // movzxw 2482 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2483 emit_int8(0x0F); 2484 emit_int8((unsigned char)0xB7); 2485 emit_int8(0xC0 | encode); 2486 } 2487 2488 void Assembler::mull(Address src) { 2489 InstructionMark im(this); 2490 prefix(src); 2491 emit_int8((unsigned char)0xF7); 2492 emit_operand(rsp, src); 2493 } 2494 2495 void Assembler::mull(Register src) { 2496 int encode = prefix_and_encode(src->encoding()); 2497 emit_int8((unsigned char)0xF7); 2498 emit_int8((unsigned char)(0xE0 | encode)); 2499 } 2500 2501 void Assembler::mulsd(XMMRegister dst, Address src) { 2502 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2503 if (VM_Version::supports_evex()) { 2504 tuple_type = EVEX_T1S; 2505 input_size_in_bits = EVEX_64bit; 2506 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_F2); 2507 } else { 2508 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2); 2509 } 2510 } 2511 2512 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { 2513 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2514 if (VM_Version::supports_evex()) { 2515 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_F2); 2516 } else { 2517 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2); 2518 } 2519 } 2520 2521 void Assembler::mulss(XMMRegister dst, Address src) { 2522 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2523 if (VM_Version::supports_evex()) { 2524 tuple_type = EVEX_T1S; 2525 input_size_in_bits = EVEX_32bit; 2526 } 2527 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3); 2528 } 2529 2530 void Assembler::mulss(XMMRegister dst, XMMRegister src) { 2531 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2532 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3); 2533 } 2534 2535 void Assembler::negl(Register dst) { 2536 int encode = prefix_and_encode(dst->encoding()); 2537 emit_int8((unsigned char)0xF7); 2538 emit_int8((unsigned char)(0xD8 | encode)); 2539 } 2540 2541 void Assembler::nop(int i) { 2542 #ifdef ASSERT 2543 assert(i > 0, " "); 2544 // The fancy nops aren't currently recognized by debuggers making it a 2545 // pain to disassemble code while debugging. If asserts are on clearly 2546 // speed is not an issue so simply use the single byte traditional nop 2547 // to do alignment. 2548 2549 for (; i > 0 ; i--) emit_int8((unsigned char)0x90); 2550 return; 2551 2552 #endif // ASSERT 2553 2554 if (UseAddressNop && VM_Version::is_intel()) { 2555 // 2556 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel 2557 // 1: 0x90 2558 // 2: 0x66 0x90 2559 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2560 // 4: 0x0F 0x1F 0x40 0x00 2561 // 5: 0x0F 0x1F 0x44 0x00 0x00 2562 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2563 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2564 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2565 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2566 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2567 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2568 2569 // The rest coding is Intel specific - don't use consecutive address nops 2570 2571 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2572 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2573 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2574 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2575 2576 while(i >= 15) { 2577 // For Intel don't generate consecutive addess nops (mix with regular nops) 2578 i -= 15; 2579 emit_int8(0x66); // size prefix 2580 emit_int8(0x66); // size prefix 2581 emit_int8(0x66); // size prefix 2582 addr_nop_8(); 2583 emit_int8(0x66); // size prefix 2584 emit_int8(0x66); // size prefix 2585 emit_int8(0x66); // size prefix 2586 emit_int8((unsigned char)0x90); 2587 // nop 2588 } 2589 switch (i) { 2590 case 14: 2591 emit_int8(0x66); // size prefix 2592 case 13: 2593 emit_int8(0x66); // size prefix 2594 case 12: 2595 addr_nop_8(); 2596 emit_int8(0x66); // size prefix 2597 emit_int8(0x66); // size prefix 2598 emit_int8(0x66); // size prefix 2599 emit_int8((unsigned char)0x90); 2600 // nop 2601 break; 2602 case 11: 2603 emit_int8(0x66); // size prefix 2604 case 10: 2605 emit_int8(0x66); // size prefix 2606 case 9: 2607 emit_int8(0x66); // size prefix 2608 case 8: 2609 addr_nop_8(); 2610 break; 2611 case 7: 2612 addr_nop_7(); 2613 break; 2614 case 6: 2615 emit_int8(0x66); // size prefix 2616 case 5: 2617 addr_nop_5(); 2618 break; 2619 case 4: 2620 addr_nop_4(); 2621 break; 2622 case 3: 2623 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 2624 emit_int8(0x66); // size prefix 2625 case 2: 2626 emit_int8(0x66); // size prefix 2627 case 1: 2628 emit_int8((unsigned char)0x90); 2629 // nop 2630 break; 2631 default: 2632 assert(i == 0, " "); 2633 } 2634 return; 2635 } 2636 if (UseAddressNop && VM_Version::is_amd()) { 2637 // 2638 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. 2639 // 1: 0x90 2640 // 2: 0x66 0x90 2641 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2642 // 4: 0x0F 0x1F 0x40 0x00 2643 // 5: 0x0F 0x1F 0x44 0x00 0x00 2644 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2645 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2646 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2647 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2648 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2649 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2650 2651 // The rest coding is AMD specific - use consecutive address nops 2652 2653 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 2654 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 2655 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2656 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2657 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2658 // Size prefixes (0x66) are added for larger sizes 2659 2660 while(i >= 22) { 2661 i -= 11; 2662 emit_int8(0x66); // size prefix 2663 emit_int8(0x66); // size prefix 2664 emit_int8(0x66); // size prefix 2665 addr_nop_8(); 2666 } 2667 // Generate first nop for size between 21-12 2668 switch (i) { 2669 case 21: 2670 i -= 1; 2671 emit_int8(0x66); // size prefix 2672 case 20: 2673 case 19: 2674 i -= 1; 2675 emit_int8(0x66); // size prefix 2676 case 18: 2677 case 17: 2678 i -= 1; 2679 emit_int8(0x66); // size prefix 2680 case 16: 2681 case 15: 2682 i -= 8; 2683 addr_nop_8(); 2684 break; 2685 case 14: 2686 case 13: 2687 i -= 7; 2688 addr_nop_7(); 2689 break; 2690 case 12: 2691 i -= 6; 2692 emit_int8(0x66); // size prefix 2693 addr_nop_5(); 2694 break; 2695 default: 2696 assert(i < 12, " "); 2697 } 2698 2699 // Generate second nop for size between 11-1 2700 switch (i) { 2701 case 11: 2702 emit_int8(0x66); // size prefix 2703 case 10: 2704 emit_int8(0x66); // size prefix 2705 case 9: 2706 emit_int8(0x66); // size prefix 2707 case 8: 2708 addr_nop_8(); 2709 break; 2710 case 7: 2711 addr_nop_7(); 2712 break; 2713 case 6: 2714 emit_int8(0x66); // size prefix 2715 case 5: 2716 addr_nop_5(); 2717 break; 2718 case 4: 2719 addr_nop_4(); 2720 break; 2721 case 3: 2722 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 2723 emit_int8(0x66); // size prefix 2724 case 2: 2725 emit_int8(0x66); // size prefix 2726 case 1: 2727 emit_int8((unsigned char)0x90); 2728 // nop 2729 break; 2730 default: 2731 assert(i == 0, " "); 2732 } 2733 return; 2734 } 2735 2736 // Using nops with size prefixes "0x66 0x90". 2737 // From AMD Optimization Guide: 2738 // 1: 0x90 2739 // 2: 0x66 0x90 2740 // 3: 0x66 0x66 0x90 2741 // 4: 0x66 0x66 0x66 0x90 2742 // 5: 0x66 0x66 0x90 0x66 0x90 2743 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 2744 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 2745 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 2746 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 2747 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 2748 // 2749 while(i > 12) { 2750 i -= 4; 2751 emit_int8(0x66); // size prefix 2752 emit_int8(0x66); 2753 emit_int8(0x66); 2754 emit_int8((unsigned char)0x90); 2755 // nop 2756 } 2757 // 1 - 12 nops 2758 if(i > 8) { 2759 if(i > 9) { 2760 i -= 1; 2761 emit_int8(0x66); 2762 } 2763 i -= 3; 2764 emit_int8(0x66); 2765 emit_int8(0x66); 2766 emit_int8((unsigned char)0x90); 2767 } 2768 // 1 - 8 nops 2769 if(i > 4) { 2770 if(i > 6) { 2771 i -= 1; 2772 emit_int8(0x66); 2773 } 2774 i -= 3; 2775 emit_int8(0x66); 2776 emit_int8(0x66); 2777 emit_int8((unsigned char)0x90); 2778 } 2779 switch (i) { 2780 case 4: 2781 emit_int8(0x66); 2782 case 3: 2783 emit_int8(0x66); 2784 case 2: 2785 emit_int8(0x66); 2786 case 1: 2787 emit_int8((unsigned char)0x90); 2788 break; 2789 default: 2790 assert(i == 0, " "); 2791 } 2792 } 2793 2794 void Assembler::notl(Register dst) { 2795 int encode = prefix_and_encode(dst->encoding()); 2796 emit_int8((unsigned char)0xF7); 2797 emit_int8((unsigned char)(0xD0 | encode)); 2798 } 2799 2800 void Assembler::orl(Address dst, int32_t imm32) { 2801 InstructionMark im(this); 2802 prefix(dst); 2803 emit_arith_operand(0x81, rcx, dst, imm32); 2804 } 2805 2806 void Assembler::orl(Register dst, int32_t imm32) { 2807 prefix(dst); 2808 emit_arith(0x81, 0xC8, dst, imm32); 2809 } 2810 2811 void Assembler::orl(Register dst, Address src) { 2812 InstructionMark im(this); 2813 prefix(src, dst); 2814 emit_int8(0x0B); 2815 emit_operand(dst, src); 2816 } 2817 2818 void Assembler::orl(Register dst, Register src) { 2819 (void) prefix_and_encode(dst->encoding(), src->encoding()); 2820 emit_arith(0x0B, 0xC0, dst, src); 2821 } 2822 2823 void Assembler::orl(Address dst, Register src) { 2824 InstructionMark im(this); 2825 prefix(dst, src); 2826 emit_int8(0x09); 2827 emit_operand(src, dst); 2828 } 2829 2830 void Assembler::packuswb(XMMRegister dst, Address src) { 2831 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2832 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 2833 if (VM_Version::supports_evex()) { 2834 tuple_type = EVEX_FV; 2835 input_size_in_bits = EVEX_32bit; 2836 } 2837 emit_simd_arith(0x67, dst, src, VEX_SIMD_66, 2838 false, (VM_Version::supports_avx512dq() == false)); 2839 } 2840 2841 void Assembler::packuswb(XMMRegister dst, XMMRegister src) { 2842 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2843 emit_simd_arith(0x67, dst, src, VEX_SIMD_66, 2844 false, (VM_Version::supports_avx512dq() == false)); 2845 } 2846 2847 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2848 assert(UseAVX > 0, "some form of AVX must be enabled"); 2849 emit_vex_arith(0x67, dst, nds, src, VEX_SIMD_66, vector_len, 2850 false, (VM_Version::supports_avx512dq() == false)); 2851 } 2852 2853 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 2854 assert(VM_Version::supports_avx2(), ""); 2855 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, 2856 VEX_OPCODE_0F_3A, true, vector_len); 2857 emit_int8(0x00); 2858 emit_int8(0xC0 | encode); 2859 emit_int8(imm8); 2860 } 2861 2862 void Assembler::pause() { 2863 emit_int8((unsigned char)0xF3); 2864 emit_int8((unsigned char)0x90); 2865 } 2866 2867 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 2868 assert(VM_Version::supports_sse4_2(), ""); 2869 InstructionMark im(this); 2870 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, false, VEX_OPCODE_0F_3A, 2871 false, AVX_128bit, true); 2872 emit_int8(0x61); 2873 emit_operand(dst, src); 2874 emit_int8(imm8); 2875 } 2876 2877 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 2878 assert(VM_Version::supports_sse4_2(), ""); 2879 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, 2880 VEX_OPCODE_0F_3A, false, AVX_128bit, true); 2881 emit_int8(0x61); 2882 emit_int8((unsigned char)(0xC0 | encode)); 2883 emit_int8(imm8); 2884 } 2885 2886 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 2887 assert(VM_Version::supports_sse4_1(), ""); 2888 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2889 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2890 emit_int8(0x16); 2891 emit_int8((unsigned char)(0xC0 | encode)); 2892 emit_int8(imm8); 2893 } 2894 2895 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 2896 assert(VM_Version::supports_sse4_1(), ""); 2897 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2898 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2899 emit_int8(0x16); 2900 emit_int8((unsigned char)(0xC0 | encode)); 2901 emit_int8(imm8); 2902 } 2903 2904 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 2905 assert(VM_Version::supports_sse4_1(), ""); 2906 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2907 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2908 emit_int8(0x22); 2909 emit_int8((unsigned char)(0xC0 | encode)); 2910 emit_int8(imm8); 2911 } 2912 2913 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 2914 assert(VM_Version::supports_sse4_1(), ""); 2915 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2916 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2917 emit_int8(0x22); 2918 emit_int8((unsigned char)(0xC0 | encode)); 2919 emit_int8(imm8); 2920 } 2921 2922 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 2923 assert(VM_Version::supports_sse4_1(), ""); 2924 if (VM_Version::supports_evex()) { 2925 tuple_type = EVEX_HVM; 2926 } 2927 InstructionMark im(this); 2928 simd_prefix(dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38); 2929 emit_int8(0x30); 2930 emit_operand(dst, src); 2931 } 2932 2933 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 2934 assert(VM_Version::supports_sse4_1(), ""); 2935 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38); 2936 emit_int8(0x30); 2937 emit_int8((unsigned char)(0xC0 | encode)); 2938 } 2939 2940 // generic 2941 void Assembler::pop(Register dst) { 2942 int encode = prefix_and_encode(dst->encoding()); 2943 emit_int8(0x58 | encode); 2944 } 2945 2946 void Assembler::popcntl(Register dst, Address src) { 2947 assert(VM_Version::supports_popcnt(), "must support"); 2948 InstructionMark im(this); 2949 emit_int8((unsigned char)0xF3); 2950 prefix(src, dst); 2951 emit_int8(0x0F); 2952 emit_int8((unsigned char)0xB8); 2953 emit_operand(dst, src); 2954 } 2955 2956 void Assembler::popcntl(Register dst, Register src) { 2957 assert(VM_Version::supports_popcnt(), "must support"); 2958 emit_int8((unsigned char)0xF3); 2959 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2960 emit_int8(0x0F); 2961 emit_int8((unsigned char)0xB8); 2962 emit_int8((unsigned char)(0xC0 | encode)); 2963 } 2964 2965 void Assembler::popf() { 2966 emit_int8((unsigned char)0x9D); 2967 } 2968 2969 #ifndef _LP64 // no 32bit push/pop on amd64 2970 void Assembler::popl(Address dst) { 2971 // NOTE: this will adjust stack by 8byte on 64bits 2972 InstructionMark im(this); 2973 prefix(dst); 2974 emit_int8((unsigned char)0x8F); 2975 emit_operand(rax, dst); 2976 } 2977 #endif 2978 2979 void Assembler::prefetch_prefix(Address src) { 2980 prefix(src); 2981 emit_int8(0x0F); 2982 } 2983 2984 void Assembler::prefetchnta(Address src) { 2985 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 2986 InstructionMark im(this); 2987 prefetch_prefix(src); 2988 emit_int8(0x18); 2989 emit_operand(rax, src); // 0, src 2990 } 2991 2992 void Assembler::prefetchr(Address src) { 2993 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 2994 InstructionMark im(this); 2995 prefetch_prefix(src); 2996 emit_int8(0x0D); 2997 emit_operand(rax, src); // 0, src 2998 } 2999 3000 void Assembler::prefetcht0(Address src) { 3001 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3002 InstructionMark im(this); 3003 prefetch_prefix(src); 3004 emit_int8(0x18); 3005 emit_operand(rcx, src); // 1, src 3006 } 3007 3008 void Assembler::prefetcht1(Address src) { 3009 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3010 InstructionMark im(this); 3011 prefetch_prefix(src); 3012 emit_int8(0x18); 3013 emit_operand(rdx, src); // 2, src 3014 } 3015 3016 void Assembler::prefetcht2(Address src) { 3017 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3018 InstructionMark im(this); 3019 prefetch_prefix(src); 3020 emit_int8(0x18); 3021 emit_operand(rbx, src); // 3, src 3022 } 3023 3024 void Assembler::prefetchw(Address src) { 3025 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3026 InstructionMark im(this); 3027 prefetch_prefix(src); 3028 emit_int8(0x0D); 3029 emit_operand(rcx, src); // 1, src 3030 } 3031 3032 void Assembler::prefix(Prefix p) { 3033 emit_int8(p); 3034 } 3035 3036 void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 3037 assert(VM_Version::supports_ssse3(), ""); 3038 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38, 3039 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3040 emit_int8(0x00); 3041 emit_int8((unsigned char)(0xC0 | encode)); 3042 } 3043 3044 void Assembler::pshufb(XMMRegister dst, Address src) { 3045 assert(VM_Version::supports_ssse3(), ""); 3046 if (VM_Version::supports_evex()) { 3047 tuple_type = EVEX_FVM; 3048 } 3049 InstructionMark im(this); 3050 simd_prefix(dst, dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38, 3051 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3052 emit_int8(0x00); 3053 emit_operand(dst, src); 3054 } 3055 3056 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 3057 assert(isByte(mode), "invalid value"); 3058 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3059 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66); 3060 emit_int8(mode & 0xFF); 3061 3062 } 3063 3064 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { 3065 assert(isByte(mode), "invalid value"); 3066 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3067 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3068 if (VM_Version::supports_evex()) { 3069 tuple_type = EVEX_FV; 3070 input_size_in_bits = EVEX_32bit; 3071 } 3072 InstructionMark im(this); 3073 simd_prefix(dst, src, VEX_SIMD_66, false); 3074 emit_int8(0x70); 3075 emit_operand(dst, src); 3076 emit_int8(mode & 0xFF); 3077 } 3078 3079 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3080 assert(isByte(mode), "invalid value"); 3081 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3082 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2, false, 3083 (VM_Version::supports_avx512bw() == false)); 3084 emit_int8(mode & 0xFF); 3085 } 3086 3087 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { 3088 assert(isByte(mode), "invalid value"); 3089 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3090 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3091 if (VM_Version::supports_evex()) { 3092 tuple_type = EVEX_FVM; 3093 } 3094 InstructionMark im(this); 3095 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, false, VEX_OPCODE_0F, 3096 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3097 emit_int8(0x70); 3098 emit_operand(dst, src); 3099 emit_int8(mode & 0xFF); 3100 } 3101 3102 void Assembler::psrldq(XMMRegister dst, int shift) { 3103 // Shift 128 bit value in xmm register by number of bytes. 3104 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3105 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3106 emit_int8(0x73); 3107 emit_int8((unsigned char)(0xC0 | encode)); 3108 emit_int8(shift); 3109 } 3110 3111 void Assembler::pslldq(XMMRegister dst, int shift) { 3112 // Shift left 128 bit value in xmm register by number of bytes. 3113 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3114 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3115 emit_int8(0x73); 3116 emit_int8((unsigned char)(0xC0 | encode)); 3117 emit_int8(shift); 3118 } 3119 3120 void Assembler::ptest(XMMRegister dst, Address src) { 3121 assert(VM_Version::supports_sse4_1(), ""); 3122 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3123 InstructionMark im(this); 3124 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, false, 3125 VEX_OPCODE_0F_38, false, AVX_128bit, true); 3126 emit_int8(0x17); 3127 emit_operand(dst, src); 3128 } 3129 3130 void Assembler::ptest(XMMRegister dst, XMMRegister src) { 3131 assert(VM_Version::supports_sse4_1(), ""); 3132 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, 3133 VEX_OPCODE_0F_38, false, AVX_128bit, true); 3134 emit_int8(0x17); 3135 emit_int8((unsigned char)(0xC0 | encode)); 3136 } 3137 3138 void Assembler::vptest(XMMRegister dst, Address src) { 3139 assert(VM_Version::supports_avx(), ""); 3140 InstructionMark im(this); 3141 int vector_len = AVX_256bit; 3142 assert(dst != xnoreg, "sanity"); 3143 int dst_enc = dst->encoding(); 3144 // swap src<->dst for encoding 3145 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len, true, false); 3146 emit_int8(0x17); 3147 emit_operand(dst, src); 3148 } 3149 3150 void Assembler::vptest(XMMRegister dst, XMMRegister src) { 3151 assert(VM_Version::supports_avx(), ""); 3152 int vector_len = AVX_256bit; 3153 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 3154 vector_len, VEX_OPCODE_0F_38, true, false); 3155 emit_int8(0x17); 3156 emit_int8((unsigned char)(0xC0 | encode)); 3157 } 3158 3159 void Assembler::punpcklbw(XMMRegister dst, Address src) { 3160 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3161 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3162 if (VM_Version::supports_evex()) { 3163 tuple_type = EVEX_FVM; 3164 } 3165 emit_simd_arith(0x60, dst, src, VEX_SIMD_66, false, (VM_Version::supports_avx512vlbw() == false)); 3166 } 3167 3168 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3169 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3170 emit_simd_arith(0x60, dst, src, VEX_SIMD_66, false, (VM_Version::supports_avx512vlbw() == false)); 3171 } 3172 3173 void Assembler::punpckldq(XMMRegister dst, Address src) { 3174 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3175 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3176 if (VM_Version::supports_evex()) { 3177 tuple_type = EVEX_FV; 3178 input_size_in_bits = EVEX_32bit; 3179 } 3180 emit_simd_arith(0x62, dst, src, VEX_SIMD_66); 3181 } 3182 3183 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { 3184 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3185 emit_simd_arith(0x62, dst, src, VEX_SIMD_66); 3186 } 3187 3188 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 3189 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3190 emit_simd_arith(0x6C, dst, src, VEX_SIMD_66); 3191 } 3192 3193 void Assembler::push(int32_t imm32) { 3194 // in 64bits we push 64bits onto the stack but only 3195 // take a 32bit immediate 3196 emit_int8(0x68); 3197 emit_int32(imm32); 3198 } 3199 3200 void Assembler::push(Register src) { 3201 int encode = prefix_and_encode(src->encoding()); 3202 3203 emit_int8(0x50 | encode); 3204 } 3205 3206 void Assembler::pushf() { 3207 emit_int8((unsigned char)0x9C); 3208 } 3209 3210 #ifndef _LP64 // no 32bit push/pop on amd64 3211 void Assembler::pushl(Address src) { 3212 // Note this will push 64bit on 64bit 3213 InstructionMark im(this); 3214 prefix(src); 3215 emit_int8((unsigned char)0xFF); 3216 emit_operand(rsi, src); 3217 } 3218 #endif 3219 3220 void Assembler::rcll(Register dst, int imm8) { 3221 assert(isShiftCount(imm8), "illegal shift count"); 3222 int encode = prefix_and_encode(dst->encoding()); 3223 if (imm8 == 1) { 3224 emit_int8((unsigned char)0xD1); 3225 emit_int8((unsigned char)(0xD0 | encode)); 3226 } else { 3227 emit_int8((unsigned char)0xC1); 3228 emit_int8((unsigned char)0xD0 | encode); 3229 emit_int8(imm8); 3230 } 3231 } 3232 3233 void Assembler::rdtsc() { 3234 emit_int8((unsigned char)0x0F); 3235 emit_int8((unsigned char)0x31); 3236 } 3237 3238 // copies data from [esi] to [edi] using rcx pointer sized words 3239 // generic 3240 void Assembler::rep_mov() { 3241 emit_int8((unsigned char)0xF3); 3242 // MOVSQ 3243 LP64_ONLY(prefix(REX_W)); 3244 emit_int8((unsigned char)0xA5); 3245 } 3246 3247 // sets rcx bytes with rax, value at [edi] 3248 void Assembler::rep_stosb() { 3249 emit_int8((unsigned char)0xF3); // REP 3250 LP64_ONLY(prefix(REX_W)); 3251 emit_int8((unsigned char)0xAA); // STOSB 3252 } 3253 3254 // sets rcx pointer sized words with rax, value at [edi] 3255 // generic 3256 void Assembler::rep_stos() { 3257 emit_int8((unsigned char)0xF3); // REP 3258 LP64_ONLY(prefix(REX_W)); // LP64:STOSQ, LP32:STOSD 3259 emit_int8((unsigned char)0xAB); 3260 } 3261 3262 // scans rcx pointer sized words at [edi] for occurance of rax, 3263 // generic 3264 void Assembler::repne_scan() { // repne_scan 3265 emit_int8((unsigned char)0xF2); 3266 // SCASQ 3267 LP64_ONLY(prefix(REX_W)); 3268 emit_int8((unsigned char)0xAF); 3269 } 3270 3271 #ifdef _LP64 3272 // scans rcx 4 byte words at [edi] for occurance of rax, 3273 // generic 3274 void Assembler::repne_scanl() { // repne_scan 3275 emit_int8((unsigned char)0xF2); 3276 // SCASL 3277 emit_int8((unsigned char)0xAF); 3278 } 3279 #endif 3280 3281 void Assembler::ret(int imm16) { 3282 if (imm16 == 0) { 3283 emit_int8((unsigned char)0xC3); 3284 } else { 3285 emit_int8((unsigned char)0xC2); 3286 emit_int16(imm16); 3287 } 3288 } 3289 3290 void Assembler::sahf() { 3291 #ifdef _LP64 3292 // Not supported in 64bit mode 3293 ShouldNotReachHere(); 3294 #endif 3295 emit_int8((unsigned char)0x9E); 3296 } 3297 3298 void Assembler::sarl(Register dst, int imm8) { 3299 int encode = prefix_and_encode(dst->encoding()); 3300 assert(isShiftCount(imm8), "illegal shift count"); 3301 if (imm8 == 1) { 3302 emit_int8((unsigned char)0xD1); 3303 emit_int8((unsigned char)(0xF8 | encode)); 3304 } else { 3305 emit_int8((unsigned char)0xC1); 3306 emit_int8((unsigned char)(0xF8 | encode)); 3307 emit_int8(imm8); 3308 } 3309 } 3310 3311 void Assembler::sarl(Register dst) { 3312 int encode = prefix_and_encode(dst->encoding()); 3313 emit_int8((unsigned char)0xD3); 3314 emit_int8((unsigned char)(0xF8 | encode)); 3315 } 3316 3317 void Assembler::sbbl(Address dst, int32_t imm32) { 3318 InstructionMark im(this); 3319 prefix(dst); 3320 emit_arith_operand(0x81, rbx, dst, imm32); 3321 } 3322 3323 void Assembler::sbbl(Register dst, int32_t imm32) { 3324 prefix(dst); 3325 emit_arith(0x81, 0xD8, dst, imm32); 3326 } 3327 3328 3329 void Assembler::sbbl(Register dst, Address src) { 3330 InstructionMark im(this); 3331 prefix(src, dst); 3332 emit_int8(0x1B); 3333 emit_operand(dst, src); 3334 } 3335 3336 void Assembler::sbbl(Register dst, Register src) { 3337 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3338 emit_arith(0x1B, 0xC0, dst, src); 3339 } 3340 3341 void Assembler::setb(Condition cc, Register dst) { 3342 assert(0 <= cc && cc < 16, "illegal cc"); 3343 int encode = prefix_and_encode(dst->encoding(), true); 3344 emit_int8(0x0F); 3345 emit_int8((unsigned char)0x90 | cc); 3346 emit_int8((unsigned char)(0xC0 | encode)); 3347 } 3348 3349 void Assembler::shll(Register dst, int imm8) { 3350 assert(isShiftCount(imm8), "illegal shift count"); 3351 int encode = prefix_and_encode(dst->encoding()); 3352 if (imm8 == 1 ) { 3353 emit_int8((unsigned char)0xD1); 3354 emit_int8((unsigned char)(0xE0 | encode)); 3355 } else { 3356 emit_int8((unsigned char)0xC1); 3357 emit_int8((unsigned char)(0xE0 | encode)); 3358 emit_int8(imm8); 3359 } 3360 } 3361 3362 void Assembler::shll(Register dst) { 3363 int encode = prefix_and_encode(dst->encoding()); 3364 emit_int8((unsigned char)0xD3); 3365 emit_int8((unsigned char)(0xE0 | encode)); 3366 } 3367 3368 void Assembler::shrl(Register dst, int imm8) { 3369 assert(isShiftCount(imm8), "illegal shift count"); 3370 int encode = prefix_and_encode(dst->encoding()); 3371 emit_int8((unsigned char)0xC1); 3372 emit_int8((unsigned char)(0xE8 | encode)); 3373 emit_int8(imm8); 3374 } 3375 3376 void Assembler::shrl(Register dst) { 3377 int encode = prefix_and_encode(dst->encoding()); 3378 emit_int8((unsigned char)0xD3); 3379 emit_int8((unsigned char)(0xE8 | encode)); 3380 } 3381 3382 // copies a single word from [esi] to [edi] 3383 void Assembler::smovl() { 3384 emit_int8((unsigned char)0xA5); 3385 } 3386 3387 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { 3388 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3389 if (VM_Version::supports_evex()) { 3390 emit_simd_arith_q(0x51, dst, src, VEX_SIMD_F2); 3391 } else { 3392 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2); 3393 } 3394 } 3395 3396 void Assembler::sqrtsd(XMMRegister dst, Address src) { 3397 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3398 if (VM_Version::supports_evex()) { 3399 tuple_type = EVEX_T1S; 3400 input_size_in_bits = EVEX_64bit; 3401 emit_simd_arith_q(0x51, dst, src, VEX_SIMD_F2); 3402 } else { 3403 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2); 3404 } 3405 } 3406 3407 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 3408 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3409 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3); 3410 } 3411 3412 void Assembler::std() { 3413 emit_int8((unsigned char)0xFD); 3414 } 3415 3416 void Assembler::sqrtss(XMMRegister dst, Address src) { 3417 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3418 if (VM_Version::supports_evex()) { 3419 tuple_type = EVEX_T1S; 3420 input_size_in_bits = EVEX_32bit; 3421 } 3422 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3); 3423 } 3424 3425 void Assembler::stmxcsr( Address dst) { 3426 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3427 InstructionMark im(this); 3428 prefix(dst); 3429 emit_int8(0x0F); 3430 emit_int8((unsigned char)0xAE); 3431 emit_operand(as_Register(3), dst); 3432 } 3433 3434 void Assembler::subl(Address dst, int32_t imm32) { 3435 InstructionMark im(this); 3436 prefix(dst); 3437 emit_arith_operand(0x81, rbp, dst, imm32); 3438 } 3439 3440 void Assembler::subl(Address dst, Register src) { 3441 InstructionMark im(this); 3442 prefix(dst, src); 3443 emit_int8(0x29); 3444 emit_operand(src, dst); 3445 } 3446 3447 void Assembler::subl(Register dst, int32_t imm32) { 3448 prefix(dst); 3449 emit_arith(0x81, 0xE8, dst, imm32); 3450 } 3451 3452 // Force generation of a 4 byte immediate value even if it fits into 8bit 3453 void Assembler::subl_imm32(Register dst, int32_t imm32) { 3454 prefix(dst); 3455 emit_arith_imm32(0x81, 0xE8, dst, imm32); 3456 } 3457 3458 void Assembler::subl(Register dst, Address src) { 3459 InstructionMark im(this); 3460 prefix(src, dst); 3461 emit_int8(0x2B); 3462 emit_operand(dst, src); 3463 } 3464 3465 void Assembler::subl(Register dst, Register src) { 3466 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3467 emit_arith(0x2B, 0xC0, dst, src); 3468 } 3469 3470 void Assembler::subsd(XMMRegister dst, XMMRegister src) { 3471 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3472 if (VM_Version::supports_evex()) { 3473 emit_simd_arith_q(0x5C, dst, src, VEX_SIMD_F2); 3474 } else { 3475 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2); 3476 } 3477 } 3478 3479 void Assembler::subsd(XMMRegister dst, Address src) { 3480 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3481 if (VM_Version::supports_evex()) { 3482 tuple_type = EVEX_T1S; 3483 input_size_in_bits = EVEX_64bit; 3484 } 3485 emit_simd_arith_q(0x5C, dst, src, VEX_SIMD_F2); 3486 } 3487 3488 void Assembler::subss(XMMRegister dst, XMMRegister src) { 3489 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3490 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3); 3491 } 3492 3493 void Assembler::subss(XMMRegister dst, Address src) { 3494 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3495 if (VM_Version::supports_evex()) { 3496 tuple_type = EVEX_T1S; 3497 input_size_in_bits = EVEX_32bit; 3498 } 3499 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3); 3500 } 3501 3502 void Assembler::testb(Register dst, int imm8) { 3503 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 3504 (void) prefix_and_encode(dst->encoding(), true); 3505 emit_arith_b(0xF6, 0xC0, dst, imm8); 3506 } 3507 3508 void Assembler::testl(Register dst, int32_t imm32) { 3509 // not using emit_arith because test 3510 // doesn't support sign-extension of 3511 // 8bit operands 3512 int encode = dst->encoding(); 3513 if (encode == 0) { 3514 emit_int8((unsigned char)0xA9); 3515 } else { 3516 encode = prefix_and_encode(encode); 3517 emit_int8((unsigned char)0xF7); 3518 emit_int8((unsigned char)(0xC0 | encode)); 3519 } 3520 emit_int32(imm32); 3521 } 3522 3523 void Assembler::testl(Register dst, Register src) { 3524 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3525 emit_arith(0x85, 0xC0, dst, src); 3526 } 3527 3528 void Assembler::testl(Register dst, Address src) { 3529 InstructionMark im(this); 3530 prefix(src, dst); 3531 emit_int8((unsigned char)0x85); 3532 emit_operand(dst, src); 3533 } 3534 3535 void Assembler::tzcntl(Register dst, Register src) { 3536 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 3537 emit_int8((unsigned char)0xF3); 3538 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3539 emit_int8(0x0F); 3540 emit_int8((unsigned char)0xBC); 3541 emit_int8((unsigned char)0xC0 | encode); 3542 } 3543 3544 void Assembler::tzcntq(Register dst, Register src) { 3545 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 3546 emit_int8((unsigned char)0xF3); 3547 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 3548 emit_int8(0x0F); 3549 emit_int8((unsigned char)0xBC); 3550 emit_int8((unsigned char)(0xC0 | encode)); 3551 } 3552 3553 void Assembler::ucomisd(XMMRegister dst, Address src) { 3554 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3555 if (VM_Version::supports_evex()) { 3556 tuple_type = EVEX_T1S; 3557 input_size_in_bits = EVEX_64bit; 3558 emit_simd_arith_nonds_q(0x2E, dst, src, VEX_SIMD_66, true); 3559 } else { 3560 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66); 3561 } 3562 } 3563 3564 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { 3565 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3566 if (VM_Version::supports_evex()) { 3567 emit_simd_arith_nonds_q(0x2E, dst, src, VEX_SIMD_66, true); 3568 } else { 3569 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66); 3570 } 3571 } 3572 3573 void Assembler::ucomiss(XMMRegister dst, Address src) { 3574 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3575 if (VM_Version::supports_evex()) { 3576 tuple_type = EVEX_T1S; 3577 input_size_in_bits = EVEX_32bit; 3578 } 3579 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE, true); 3580 } 3581 3582 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { 3583 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3584 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE, true); 3585 } 3586 3587 void Assembler::xabort(int8_t imm8) { 3588 emit_int8((unsigned char)0xC6); 3589 emit_int8((unsigned char)0xF8); 3590 emit_int8((unsigned char)(imm8 & 0xFF)); 3591 } 3592 3593 void Assembler::xaddl(Address dst, Register src) { 3594 InstructionMark im(this); 3595 prefix(dst, src); 3596 emit_int8(0x0F); 3597 emit_int8((unsigned char)0xC1); 3598 emit_operand(src, dst); 3599 } 3600 3601 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { 3602 InstructionMark im(this); 3603 relocate(rtype); 3604 if (abort.is_bound()) { 3605 address entry = target(abort); 3606 assert(entry != NULL, "abort entry NULL"); 3607 intptr_t offset = entry - pc(); 3608 emit_int8((unsigned char)0xC7); 3609 emit_int8((unsigned char)0xF8); 3610 emit_int32(offset - 6); // 2 opcode + 4 address 3611 } else { 3612 abort.add_patch_at(code(), locator()); 3613 emit_int8((unsigned char)0xC7); 3614 emit_int8((unsigned char)0xF8); 3615 emit_int32(0); 3616 } 3617 } 3618 3619 void Assembler::xchgl(Register dst, Address src) { // xchg 3620 InstructionMark im(this); 3621 prefix(src, dst); 3622 emit_int8((unsigned char)0x87); 3623 emit_operand(dst, src); 3624 } 3625 3626 void Assembler::xchgl(Register dst, Register src) { 3627 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3628 emit_int8((unsigned char)0x87); 3629 emit_int8((unsigned char)(0xC0 | encode)); 3630 } 3631 3632 void Assembler::xend() { 3633 emit_int8((unsigned char)0x0F); 3634 emit_int8((unsigned char)0x01); 3635 emit_int8((unsigned char)0xD5); 3636 } 3637 3638 void Assembler::xgetbv() { 3639 emit_int8(0x0F); 3640 emit_int8(0x01); 3641 emit_int8((unsigned char)0xD0); 3642 } 3643 3644 void Assembler::xorl(Register dst, int32_t imm32) { 3645 prefix(dst); 3646 emit_arith(0x81, 0xF0, dst, imm32); 3647 } 3648 3649 void Assembler::xorl(Register dst, Address src) { 3650 InstructionMark im(this); 3651 prefix(src, dst); 3652 emit_int8(0x33); 3653 emit_operand(dst, src); 3654 } 3655 3656 void Assembler::xorl(Register dst, Register src) { 3657 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3658 emit_arith(0x33, 0xC0, dst, src); 3659 } 3660 3661 3662 // AVX 3-operands scalar float-point arithmetic instructions 3663 3664 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { 3665 assert(VM_Version::supports_avx(), ""); 3666 if (VM_Version::supports_evex()) { 3667 tuple_type = EVEX_T1S; 3668 input_size_in_bits = EVEX_64bit; 3669 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3670 } else { 3671 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3672 } 3673 } 3674 3675 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3676 assert(VM_Version::supports_avx(), ""); 3677 if (VM_Version::supports_evex()) { 3678 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3679 } else { 3680 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3681 } 3682 } 3683 3684 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { 3685 assert(VM_Version::supports_avx(), ""); 3686 if (VM_Version::supports_evex()) { 3687 tuple_type = EVEX_T1S; 3688 input_size_in_bits = EVEX_32bit; 3689 } 3690 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3691 } 3692 3693 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3694 assert(VM_Version::supports_avx(), ""); 3695 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3696 } 3697 3698 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { 3699 assert(VM_Version::supports_avx(), ""); 3700 if (VM_Version::supports_evex()) { 3701 tuple_type = EVEX_T1S; 3702 input_size_in_bits = EVEX_64bit; 3703 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3704 } else { 3705 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3706 } 3707 } 3708 3709 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3710 assert(VM_Version::supports_avx(), ""); 3711 if (VM_Version::supports_evex()) { 3712 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3713 } else { 3714 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3715 } 3716 } 3717 3718 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { 3719 assert(VM_Version::supports_avx(), ""); 3720 if (VM_Version::supports_evex()) { 3721 tuple_type = EVEX_T1S; 3722 input_size_in_bits = EVEX_32bit; 3723 } 3724 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3725 } 3726 3727 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3728 assert(VM_Version::supports_avx(), ""); 3729 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3730 } 3731 3732 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { 3733 assert(VM_Version::supports_avx(), ""); 3734 if (VM_Version::supports_evex()) { 3735 tuple_type = EVEX_T1S; 3736 input_size_in_bits = EVEX_64bit; 3737 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3738 } else { 3739 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3740 } 3741 } 3742 3743 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3744 assert(VM_Version::supports_avx(), ""); 3745 if (VM_Version::supports_evex()) { 3746 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3747 } else { 3748 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3749 } 3750 } 3751 3752 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { 3753 assert(VM_Version::supports_avx(), ""); 3754 if (VM_Version::supports_evex()) { 3755 tuple_type = EVEX_T1S; 3756 input_size_in_bits = EVEX_32bit; 3757 } 3758 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3759 } 3760 3761 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3762 assert(VM_Version::supports_avx(), ""); 3763 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3764 } 3765 3766 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { 3767 assert(VM_Version::supports_avx(), ""); 3768 if (VM_Version::supports_evex()) { 3769 tuple_type = EVEX_T1S; 3770 input_size_in_bits = EVEX_64bit; 3771 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3772 } else { 3773 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3774 } 3775 } 3776 3777 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3778 assert(VM_Version::supports_avx(), ""); 3779 if (VM_Version::supports_evex()) { 3780 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3781 } else { 3782 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3783 } 3784 } 3785 3786 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { 3787 assert(VM_Version::supports_avx(), ""); 3788 if (VM_Version::supports_evex()) { 3789 tuple_type = EVEX_T1S; 3790 input_size_in_bits = EVEX_32bit; 3791 } 3792 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3793 } 3794 3795 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3796 assert(VM_Version::supports_avx(), ""); 3797 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3798 } 3799 3800 //====================VECTOR ARITHMETIC===================================== 3801 3802 // Float-point vector arithmetic 3803 3804 void Assembler::addpd(XMMRegister dst, XMMRegister src) { 3805 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3806 if (VM_Version::supports_evex()) { 3807 emit_simd_arith_q(0x58, dst, src, VEX_SIMD_66); 3808 } else { 3809 emit_simd_arith(0x58, dst, src, VEX_SIMD_66); 3810 } 3811 } 3812 3813 void Assembler::addps(XMMRegister dst, XMMRegister src) { 3814 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3815 emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE); 3816 } 3817 3818 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3819 assert(VM_Version::supports_avx(), ""); 3820 if (VM_Version::supports_evex()) { 3821 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3822 } else { 3823 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3824 } 3825 } 3826 3827 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3828 assert(VM_Version::supports_avx(), ""); 3829 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector_len); 3830 } 3831 3832 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3833 assert(VM_Version::supports_avx(), ""); 3834 if (VM_Version::supports_evex()) { 3835 tuple_type = EVEX_FV; 3836 input_size_in_bits = EVEX_64bit; 3837 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3838 } else { 3839 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3840 } 3841 } 3842 3843 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3844 assert(VM_Version::supports_avx(), ""); 3845 if (VM_Version::supports_evex()) { 3846 tuple_type = EVEX_FV; 3847 input_size_in_bits = EVEX_32bit; 3848 } 3849 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector_len); 3850 } 3851 3852 void Assembler::subpd(XMMRegister dst, XMMRegister src) { 3853 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3854 if (VM_Version::supports_evex()) { 3855 emit_simd_arith_q(0x5C, dst, src, VEX_SIMD_66); 3856 } else { 3857 emit_simd_arith(0x5C, dst, src, VEX_SIMD_66); 3858 } 3859 } 3860 3861 void Assembler::subps(XMMRegister dst, XMMRegister src) { 3862 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3863 emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE); 3864 } 3865 3866 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3867 assert(VM_Version::supports_avx(), ""); 3868 if (VM_Version::supports_evex()) { 3869 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3870 } else { 3871 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3872 } 3873 } 3874 3875 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3876 assert(VM_Version::supports_avx(), ""); 3877 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector_len); 3878 } 3879 3880 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3881 assert(VM_Version::supports_avx(), ""); 3882 if (VM_Version::supports_evex()) { 3883 tuple_type = EVEX_FV; 3884 input_size_in_bits = EVEX_64bit; 3885 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3886 } else { 3887 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3888 } 3889 } 3890 3891 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3892 assert(VM_Version::supports_avx(), ""); 3893 if (VM_Version::supports_evex()) { 3894 tuple_type = EVEX_FV; 3895 input_size_in_bits = EVEX_32bit; 3896 } 3897 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector_len); 3898 } 3899 3900 void Assembler::mulpd(XMMRegister dst, XMMRegister src) { 3901 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3902 if (VM_Version::supports_evex()) { 3903 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_66); 3904 } else { 3905 emit_simd_arith(0x59, dst, src, VEX_SIMD_66); 3906 } 3907 } 3908 3909 void Assembler::mulps(XMMRegister dst, XMMRegister src) { 3910 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3911 emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE); 3912 } 3913 3914 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3915 assert(VM_Version::supports_avx(), ""); 3916 if (VM_Version::supports_evex()) { 3917 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3918 } else { 3919 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3920 } 3921 } 3922 3923 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3924 assert(VM_Version::supports_avx(), ""); 3925 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector_len); 3926 } 3927 3928 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3929 assert(VM_Version::supports_avx(), ""); 3930 if (VM_Version::supports_evex()) { 3931 tuple_type = EVEX_FV; 3932 input_size_in_bits = EVEX_64bit; 3933 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3934 } else { 3935 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3936 } 3937 } 3938 3939 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3940 assert(VM_Version::supports_avx(), ""); 3941 if (VM_Version::supports_evex()) { 3942 tuple_type = EVEX_FV; 3943 input_size_in_bits = EVEX_32bit; 3944 } 3945 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector_len); 3946 } 3947 3948 void Assembler::divpd(XMMRegister dst, XMMRegister src) { 3949 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3950 if (VM_Version::supports_evex()) { 3951 emit_simd_arith_q(0x5E, dst, src, VEX_SIMD_66); 3952 } else { 3953 emit_simd_arith(0x5E, dst, src, VEX_SIMD_66); 3954 } 3955 } 3956 3957 void Assembler::divps(XMMRegister dst, XMMRegister src) { 3958 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3959 emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE); 3960 } 3961 3962 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3963 assert(VM_Version::supports_avx(), ""); 3964 if (VM_Version::supports_evex()) { 3965 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3966 } else { 3967 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3968 } 3969 } 3970 3971 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3972 assert(VM_Version::supports_avx(), ""); 3973 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector_len); 3974 } 3975 3976 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3977 assert(VM_Version::supports_avx(), ""); 3978 if (VM_Version::supports_evex()) { 3979 tuple_type = EVEX_FV; 3980 input_size_in_bits = EVEX_64bit; 3981 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3982 } else { 3983 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3984 } 3985 } 3986 3987 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3988 assert(VM_Version::supports_avx(), ""); 3989 if (VM_Version::supports_evex()) { 3990 tuple_type = EVEX_FV; 3991 input_size_in_bits = EVEX_32bit; 3992 } 3993 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector_len); 3994 } 3995 3996 void Assembler::andpd(XMMRegister dst, XMMRegister src) { 3997 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3998 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 3999 emit_simd_arith_q(0x54, dst, src, VEX_SIMD_66); 4000 } else { 4001 emit_simd_arith(0x54, dst, src, VEX_SIMD_66, false, true); 4002 } 4003 } 4004 4005 void Assembler::andps(XMMRegister dst, XMMRegister src) { 4006 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4007 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE, false, 4008 (VM_Version::supports_avx512dq() == false)); 4009 } 4010 4011 void Assembler::andps(XMMRegister dst, Address src) { 4012 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4013 if (VM_Version::supports_evex()) { 4014 tuple_type = EVEX_FV; 4015 input_size_in_bits = EVEX_32bit; 4016 } 4017 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE, 4018 false, (VM_Version::supports_avx512dq() == false)); 4019 } 4020 4021 void Assembler::andpd(XMMRegister dst, Address src) { 4022 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4023 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4024 tuple_type = EVEX_FV; 4025 input_size_in_bits = EVEX_64bit; 4026 emit_simd_arith_q(0x54, dst, src, VEX_SIMD_66); 4027 } else { 4028 emit_simd_arith(0x54, dst, src, VEX_SIMD_66, false, true); 4029 } 4030 } 4031 4032 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4033 assert(VM_Version::supports_avx(), ""); 4034 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4035 emit_vex_arith_q(0x54, dst, nds, src, VEX_SIMD_66, vector_len); 4036 } else { 4037 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector_len, true); 4038 } 4039 } 4040 4041 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4042 assert(VM_Version::supports_avx(), ""); 4043 bool legacy_mode = (VM_Version::supports_avx512dq() == false); 4044 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, legacy_mode); 4045 } 4046 4047 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4048 assert(VM_Version::supports_avx(), ""); 4049 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4050 tuple_type = EVEX_FV; 4051 input_size_in_bits = EVEX_64bit; 4052 emit_vex_arith_q(0x54, dst, nds, src, VEX_SIMD_66, vector_len); 4053 } else { 4054 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector_len, true); 4055 } 4056 } 4057 4058 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4059 assert(VM_Version::supports_avx(), ""); 4060 if (VM_Version::supports_evex()) { 4061 tuple_type = EVEX_FV; 4062 input_size_in_bits = EVEX_32bit; 4063 } 4064 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, 4065 (VM_Version::supports_avx512dq() == false)); 4066 } 4067 4068 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { 4069 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4070 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4071 emit_simd_arith_q(0x57, dst, src, VEX_SIMD_66); 4072 } else { 4073 emit_simd_arith(0x57, dst, src, VEX_SIMD_66, false, true); 4074 } 4075 } 4076 4077 void Assembler::xorps(XMMRegister dst, XMMRegister src) { 4078 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4079 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE, 4080 false, (VM_Version::supports_avx512dq() == false)); 4081 } 4082 4083 void Assembler::xorpd(XMMRegister dst, Address src) { 4084 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4085 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4086 tuple_type = EVEX_FV; 4087 input_size_in_bits = EVEX_64bit; 4088 emit_simd_arith_q(0x57, dst, src, VEX_SIMD_66); 4089 } else { 4090 emit_simd_arith(0x57, dst, src, VEX_SIMD_66, false, true); 4091 } 4092 } 4093 4094 void Assembler::xorps(XMMRegister dst, Address src) { 4095 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4096 if (VM_Version::supports_evex()) { 4097 tuple_type = EVEX_FV; 4098 input_size_in_bits = EVEX_32bit; 4099 } 4100 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE, false, 4101 (VM_Version::supports_avx512dq() == false)); 4102 } 4103 4104 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4105 assert(VM_Version::supports_avx(), ""); 4106 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4107 emit_vex_arith_q(0x57, dst, nds, src, VEX_SIMD_66, vector_len); 4108 } else { 4109 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector_len, true); 4110 } 4111 } 4112 4113 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4114 assert(VM_Version::supports_avx(), ""); 4115 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector_len, 4116 (VM_Version::supports_avx512dq() == false)); 4117 } 4118 4119 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4120 assert(VM_Version::supports_avx(), ""); 4121 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4122 tuple_type = EVEX_FV; 4123 input_size_in_bits = EVEX_64bit; 4124 emit_vex_arith_q(0x57, dst, nds, src, VEX_SIMD_66, vector_len); 4125 } else { 4126 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector_len, true); 4127 } 4128 } 4129 4130 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4131 assert(VM_Version::supports_avx(), ""); 4132 if (VM_Version::supports_evex()) { 4133 tuple_type = EVEX_FV; 4134 input_size_in_bits = EVEX_32bit; 4135 } 4136 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector_len, 4137 (VM_Version::supports_avx512dq() == false)); 4138 } 4139 4140 // Integer vector arithmetic 4141 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4142 assert(VM_Version::supports_avx() && (vector_len == 0) || 4143 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 4144 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, 4145 VEX_OPCODE_0F_38, true, false); 4146 emit_int8(0x01); 4147 emit_int8((unsigned char)(0xC0 | encode)); 4148 } 4149 4150 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4151 assert(VM_Version::supports_avx() && (vector_len == 0) || 4152 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 4153 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, 4154 VEX_OPCODE_0F_38, true, false); 4155 emit_int8(0x02); 4156 emit_int8((unsigned char)(0xC0 | encode)); 4157 } 4158 4159 void Assembler::paddb(XMMRegister dst, XMMRegister src) { 4160 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4161 emit_simd_arith(0xFC, dst, src, VEX_SIMD_66); 4162 } 4163 4164 void Assembler::paddw(XMMRegister dst, XMMRegister src) { 4165 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4166 emit_simd_arith(0xFD, dst, src, VEX_SIMD_66); 4167 } 4168 4169 void Assembler::paddd(XMMRegister dst, XMMRegister src) { 4170 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4171 emit_simd_arith(0xFE, dst, src, VEX_SIMD_66); 4172 } 4173 4174 void Assembler::paddq(XMMRegister dst, XMMRegister src) { 4175 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4176 if (VM_Version::supports_evex()) { 4177 emit_simd_arith_q(0xD4, dst, src, VEX_SIMD_66); 4178 } else { 4179 emit_simd_arith(0xD4, dst, src, VEX_SIMD_66); 4180 } 4181 } 4182 4183 void Assembler::phaddw(XMMRegister dst, XMMRegister src) { 4184 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 4185 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 4186 VEX_OPCODE_0F_38, false, AVX_128bit, true); 4187 emit_int8(0x01); 4188 emit_int8((unsigned char)(0xC0 | encode)); 4189 } 4190 4191 void Assembler::phaddd(XMMRegister dst, XMMRegister src) { 4192 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 4193 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 4194 VEX_OPCODE_0F_38, false, AVX_128bit, true); 4195 emit_int8(0x02); 4196 emit_int8((unsigned char)(0xC0 | encode)); 4197 } 4198 4199 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4200 assert(UseAVX > 0, "requires some form of AVX"); 4201 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector_len, 4202 (VM_Version::supports_avx512bw() == false)); 4203 } 4204 4205 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4206 assert(UseAVX > 0, "requires some form of AVX"); 4207 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector_len, 4208 (VM_Version::supports_avx512bw() == false)); 4209 } 4210 4211 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4212 assert(UseAVX > 0, "requires some form of AVX"); 4213 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector_len); 4214 } 4215 4216 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4217 assert(UseAVX > 0, "requires some form of AVX"); 4218 if (VM_Version::supports_evex()) { 4219 emit_vex_arith_q(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4220 } else { 4221 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4222 } 4223 } 4224 4225 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4226 assert(UseAVX > 0, "requires some form of AVX"); 4227 if (VM_Version::supports_evex()) { 4228 tuple_type = EVEX_FVM; 4229 } 4230 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector_len); 4231 } 4232 4233 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4234 assert(UseAVX > 0, "requires some form of AVX"); 4235 if (VM_Version::supports_evex()) { 4236 tuple_type = EVEX_FVM; 4237 } 4238 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector_len); 4239 } 4240 4241 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4242 assert(UseAVX > 0, "requires some form of AVX"); 4243 if (VM_Version::supports_evex()) { 4244 tuple_type = EVEX_FV; 4245 input_size_in_bits = EVEX_32bit; 4246 } 4247 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector_len); 4248 } 4249 4250 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4251 assert(UseAVX > 0, "requires some form of AVX"); 4252 if (VM_Version::supports_evex()) { 4253 tuple_type = EVEX_FV; 4254 input_size_in_bits = EVEX_64bit; 4255 emit_vex_arith_q(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4256 } else { 4257 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4258 } 4259 } 4260 4261 void Assembler::psubb(XMMRegister dst, XMMRegister src) { 4262 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4263 emit_simd_arith(0xF8, dst, src, VEX_SIMD_66); 4264 } 4265 4266 void Assembler::psubw(XMMRegister dst, XMMRegister src) { 4267 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4268 emit_simd_arith(0xF9, dst, src, VEX_SIMD_66); 4269 } 4270 4271 void Assembler::psubd(XMMRegister dst, XMMRegister src) { 4272 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4273 emit_simd_arith(0xFA, dst, src, VEX_SIMD_66); 4274 } 4275 4276 void Assembler::psubq(XMMRegister dst, XMMRegister src) { 4277 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4278 if (VM_Version::supports_evex()) { 4279 emit_simd_arith_q(0xFB, dst, src, VEX_SIMD_66); 4280 } else { 4281 emit_simd_arith(0xFB, dst, src, VEX_SIMD_66); 4282 } 4283 } 4284 4285 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4286 assert(UseAVX > 0, "requires some form of AVX"); 4287 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector_len, 4288 (VM_Version::supports_avx512bw() == false)); 4289 } 4290 4291 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4292 assert(UseAVX > 0, "requires some form of AVX"); 4293 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector_len, 4294 (VM_Version::supports_avx512bw() == false)); 4295 } 4296 4297 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4298 assert(UseAVX > 0, "requires some form of AVX"); 4299 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector_len); 4300 } 4301 4302 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4303 assert(UseAVX > 0, "requires some form of AVX"); 4304 if (VM_Version::supports_evex()) { 4305 emit_vex_arith_q(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4306 } else { 4307 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4308 } 4309 } 4310 4311 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4312 assert(UseAVX > 0, "requires some form of AVX"); 4313 if (VM_Version::supports_evex()) { 4314 tuple_type = EVEX_FVM; 4315 } 4316 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector_len, 4317 (VM_Version::supports_avx512bw() == false)); 4318 } 4319 4320 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4321 assert(UseAVX > 0, "requires some form of AVX"); 4322 if (VM_Version::supports_evex()) { 4323 tuple_type = EVEX_FVM; 4324 } 4325 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector_len, 4326 (VM_Version::supports_avx512bw() == false)); 4327 } 4328 4329 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4330 assert(UseAVX > 0, "requires some form of AVX"); 4331 if (VM_Version::supports_evex()) { 4332 tuple_type = EVEX_FV; 4333 input_size_in_bits = EVEX_32bit; 4334 } 4335 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector_len); 4336 } 4337 4338 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4339 assert(UseAVX > 0, "requires some form of AVX"); 4340 if (VM_Version::supports_evex()) { 4341 tuple_type = EVEX_FV; 4342 input_size_in_bits = EVEX_64bit; 4343 emit_vex_arith_q(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4344 } else { 4345 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4346 } 4347 } 4348 4349 void Assembler::pmullw(XMMRegister dst, XMMRegister src) { 4350 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4351 emit_simd_arith(0xD5, dst, src, VEX_SIMD_66, 4352 (VM_Version::supports_avx512bw() == false)); 4353 } 4354 4355 void Assembler::pmulld(XMMRegister dst, XMMRegister src) { 4356 assert(VM_Version::supports_sse4_1(), ""); 4357 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, 4358 false, VEX_OPCODE_0F_38); 4359 emit_int8(0x40); 4360 emit_int8((unsigned char)(0xC0 | encode)); 4361 } 4362 4363 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4364 assert(UseAVX > 0, "requires some form of AVX"); 4365 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector_len, 4366 (VM_Version::supports_avx512bw() == false)); 4367 } 4368 4369 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4370 assert(UseAVX > 0, "requires some form of AVX"); 4371 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, 4372 vector_len, VEX_OPCODE_0F_38); 4373 emit_int8(0x40); 4374 emit_int8((unsigned char)(0xC0 | encode)); 4375 } 4376 4377 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4378 assert(UseAVX > 2, "requires some form of AVX"); 4379 int src_enc = src->encoding(); 4380 int dst_enc = dst->encoding(); 4381 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4382 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, VEX_SIMD_66, 4383 VEX_OPCODE_0F_38, true, vector_len, false, false); 4384 emit_int8(0x40); 4385 emit_int8((unsigned char)(0xC0 | encode)); 4386 } 4387 4388 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4389 assert(UseAVX > 0, "requires some form of AVX"); 4390 if (VM_Version::supports_evex()) { 4391 tuple_type = EVEX_FVM; 4392 } 4393 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector_len); 4394 } 4395 4396 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4397 assert(UseAVX > 0, "requires some form of AVX"); 4398 if (VM_Version::supports_evex()) { 4399 tuple_type = EVEX_FV; 4400 input_size_in_bits = EVEX_32bit; 4401 } 4402 InstructionMark im(this); 4403 int dst_enc = dst->encoding(); 4404 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4405 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, 4406 VEX_OPCODE_0F_38, false, vector_len); 4407 emit_int8(0x40); 4408 emit_operand(dst, src); 4409 } 4410 4411 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4412 assert(UseAVX > 0, "requires some form of AVX"); 4413 if (VM_Version::supports_evex()) { 4414 tuple_type = EVEX_FV; 4415 input_size_in_bits = EVEX_64bit; 4416 } 4417 InstructionMark im(this); 4418 int dst_enc = dst->encoding(); 4419 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4420 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len); 4421 emit_int8(0x40); 4422 emit_operand(dst, src); 4423 } 4424 4425 // Shift packed integers left by specified number of bits. 4426 void Assembler::psllw(XMMRegister dst, int shift) { 4427 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4428 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 4429 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, 4430 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 4431 emit_int8(0x71); 4432 emit_int8((unsigned char)(0xC0 | encode)); 4433 emit_int8(shift & 0xFF); 4434 } 4435 4436 void Assembler::pslld(XMMRegister dst, int shift) { 4437 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4438 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 4439 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, false); 4440 emit_int8(0x72); 4441 emit_int8((unsigned char)(0xC0 | encode)); 4442 emit_int8(shift & 0xFF); 4443 } 4444 4445 void Assembler::psllq(XMMRegister dst, int shift) { 4446 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4447 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 4448 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, true); 4449 emit_int8(0x73); 4450 emit_int8((unsigned char)(0xC0 | encode)); 4451 emit_int8(shift & 0xFF); 4452 } 4453 4454 void Assembler::psllw(XMMRegister dst, XMMRegister shift) { 4455 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4456 emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66, false, 4457 (VM_Version::supports_avx512bw() == false)); 4458 } 4459 4460 void Assembler::pslld(XMMRegister dst, XMMRegister shift) { 4461 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4462 emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66); 4463 } 4464 4465 void Assembler::psllq(XMMRegister dst, XMMRegister shift) { 4466 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4467 if (VM_Version::supports_evex()) { 4468 emit_simd_arith_q(0xF3, dst, shift, VEX_SIMD_66); 4469 } else { 4470 emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66); 4471 } 4472 } 4473 4474 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4475 assert(UseAVX > 0, "requires some form of AVX"); 4476 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 4477 emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector_len, 4478 (VM_Version::supports_avx512bw() == false)); 4479 emit_int8(shift & 0xFF); 4480 } 4481 4482 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4483 assert(UseAVX > 0, "requires some form of AVX"); 4484 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 4485 emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector_len); 4486 emit_int8(shift & 0xFF); 4487 } 4488 4489 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4490 assert(UseAVX > 0, "requires some form of AVX"); 4491 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 4492 if (VM_Version::supports_evex()) { 4493 emit_vex_arith_q(0x73, xmm6, dst, src, VEX_SIMD_66, vector_len); 4494 } else { 4495 emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector_len); 4496 } 4497 emit_int8(shift & 0xFF); 4498 } 4499 4500 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4501 assert(UseAVX > 0, "requires some form of AVX"); 4502 emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector_len, 4503 (VM_Version::supports_avx512bw() == false)); 4504 } 4505 4506 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4507 assert(UseAVX > 0, "requires some form of AVX"); 4508 emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector_len); 4509 } 4510 4511 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4512 assert(UseAVX > 0, "requires some form of AVX"); 4513 if (VM_Version::supports_evex()) { 4514 emit_vex_arith_q(0xF3, dst, src, shift, VEX_SIMD_66, vector_len); 4515 } else { 4516 emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector_len); 4517 } 4518 } 4519 4520 // Shift packed integers logically right by specified number of bits. 4521 void Assembler::psrlw(XMMRegister dst, int shift) { 4522 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4523 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 4524 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, 4525 (VM_Version::supports_avx512bw() == false)); 4526 emit_int8(0x71); 4527 emit_int8((unsigned char)(0xC0 | encode)); 4528 emit_int8(shift & 0xFF); 4529 } 4530 4531 void Assembler::psrld(XMMRegister dst, int shift) { 4532 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4533 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 4534 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, false); 4535 emit_int8(0x72); 4536 emit_int8((unsigned char)(0xC0 | encode)); 4537 emit_int8(shift & 0xFF); 4538 } 4539 4540 void Assembler::psrlq(XMMRegister dst, int shift) { 4541 // Do not confuse it with psrldq SSE2 instruction which 4542 // shifts 128 bit value in xmm register by number of bytes. 4543 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4544 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4545 int encode = 0; 4546 if (VM_Version::supports_evex() && VM_Version::supports_avx512bw()) { 4547 encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false); 4548 } else { 4549 encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, true); 4550 } 4551 emit_int8(0x73); 4552 emit_int8((unsigned char)(0xC0 | encode)); 4553 emit_int8(shift & 0xFF); 4554 } 4555 4556 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { 4557 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4558 emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66, false, 4559 (VM_Version::supports_avx512bw() == false)); 4560 } 4561 4562 void Assembler::psrld(XMMRegister dst, XMMRegister shift) { 4563 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4564 emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66); 4565 } 4566 4567 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { 4568 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4569 if (VM_Version::supports_evex()) { 4570 emit_simd_arith_q(0xD3, dst, shift, VEX_SIMD_66); 4571 } else { 4572 emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66); 4573 } 4574 } 4575 4576 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4577 assert(UseAVX > 0, "requires some form of AVX"); 4578 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4579 emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector_len, 4580 (VM_Version::supports_avx512bw() == false)); 4581 emit_int8(shift & 0xFF); 4582 } 4583 4584 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4585 assert(UseAVX > 0, "requires some form of AVX"); 4586 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4587 emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector_len); 4588 emit_int8(shift & 0xFF); 4589 } 4590 4591 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4592 assert(UseAVX > 0, "requires some form of AVX"); 4593 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4594 if (VM_Version::supports_evex()) { 4595 emit_vex_arith_q(0x73, xmm2, dst, src, VEX_SIMD_66, vector_len); 4596 } else { 4597 emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector_len); 4598 } 4599 emit_int8(shift & 0xFF); 4600 } 4601 4602 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4603 assert(UseAVX > 0, "requires some form of AVX"); 4604 emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector_len, 4605 (VM_Version::supports_avx512bw() == false)); 4606 } 4607 4608 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4609 assert(UseAVX > 0, "requires some form of AVX"); 4610 emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector_len); 4611 } 4612 4613 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4614 assert(UseAVX > 0, "requires some form of AVX"); 4615 if (VM_Version::supports_evex()) { 4616 emit_vex_arith_q(0xD3, dst, src, shift, VEX_SIMD_66, vector_len); 4617 } else { 4618 emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector_len); 4619 } 4620 } 4621 4622 // Shift packed integers arithmetically right by specified number of bits. 4623 void Assembler::psraw(XMMRegister dst, int shift) { 4624 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4625 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 4626 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, 4627 (VM_Version::supports_avx512bw() == false)); 4628 emit_int8(0x71); 4629 emit_int8((unsigned char)(0xC0 | encode)); 4630 emit_int8(shift & 0xFF); 4631 } 4632 4633 void Assembler::psrad(XMMRegister dst, int shift) { 4634 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4635 // XMM4 is for /4 encoding: 66 0F 72 /4 ib 4636 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, false); 4637 emit_int8(0x72); 4638 emit_int8((unsigned char)(0xC0 | encode)); 4639 emit_int8(shift & 0xFF); 4640 } 4641 4642 void Assembler::psraw(XMMRegister dst, XMMRegister shift) { 4643 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4644 emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66, 4645 (VM_Version::supports_avx512bw() == false)); 4646 } 4647 4648 void Assembler::psrad(XMMRegister dst, XMMRegister shift) { 4649 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4650 emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66); 4651 } 4652 4653 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4654 assert(UseAVX > 0, "requires some form of AVX"); 4655 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 4656 emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector_len, 4657 (VM_Version::supports_avx512bw() == false)); 4658 emit_int8(shift & 0xFF); 4659 } 4660 4661 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4662 assert(UseAVX > 0, "requires some form of AVX"); 4663 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 4664 emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector_len); 4665 emit_int8(shift & 0xFF); 4666 } 4667 4668 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4669 assert(UseAVX > 0, "requires some form of AVX"); 4670 emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector_len, 4671 (VM_Version::supports_avx512bw() == false)); 4672 } 4673 4674 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4675 assert(UseAVX > 0, "requires some form of AVX"); 4676 emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector_len); 4677 } 4678 4679 4680 // AND packed integers 4681 void Assembler::pand(XMMRegister dst, XMMRegister src) { 4682 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4683 emit_simd_arith(0xDB, dst, src, VEX_SIMD_66); 4684 } 4685 4686 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4687 assert(UseAVX > 0, "requires some form of AVX"); 4688 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len); 4689 } 4690 4691 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4692 assert(UseAVX > 0, "requires some form of AVX"); 4693 if (VM_Version::supports_evex()) { 4694 tuple_type = EVEX_FV; 4695 input_size_in_bits = EVEX_32bit; 4696 } 4697 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len); 4698 } 4699 4700 void Assembler::por(XMMRegister dst, XMMRegister src) { 4701 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4702 emit_simd_arith(0xEB, dst, src, VEX_SIMD_66); 4703 } 4704 4705 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4706 assert(UseAVX > 0, "requires some form of AVX"); 4707 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector_len); 4708 } 4709 4710 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4711 assert(UseAVX > 0, "requires some form of AVX"); 4712 if (VM_Version::supports_evex()) { 4713 tuple_type = EVEX_FV; 4714 input_size_in_bits = EVEX_32bit; 4715 } 4716 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector_len); 4717 } 4718 4719 void Assembler::pxor(XMMRegister dst, XMMRegister src) { 4720 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4721 emit_simd_arith(0xEF, dst, src, VEX_SIMD_66); 4722 } 4723 4724 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4725 assert(UseAVX > 0, "requires some form of AVX"); 4726 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector_len); 4727 } 4728 4729 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4730 assert(UseAVX > 0, "requires some form of AVX"); 4731 if (VM_Version::supports_evex()) { 4732 tuple_type = EVEX_FV; 4733 input_size_in_bits = EVEX_32bit; 4734 } 4735 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector_len); 4736 } 4737 4738 4739 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4740 assert(VM_Version::supports_avx(), ""); 4741 int vector_len = AVX_256bit; 4742 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4743 emit_int8(0x18); 4744 emit_int8((unsigned char)(0xC0 | encode)); 4745 // 0x00 - insert into lower 128 bits 4746 // 0x01 - insert into upper 128 bits 4747 emit_int8(0x01); 4748 } 4749 4750 void Assembler::vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4751 assert(VM_Version::supports_evex(), ""); 4752 int vector_len = AVX_512bit; 4753 int src_enc = src->encoding(); 4754 int dst_enc = dst->encoding(); 4755 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4756 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, VEX_SIMD_66, 4757 VEX_OPCODE_0F_3A, true, vector_len, false, false); 4758 emit_int8(0x1A); 4759 emit_int8((unsigned char)(0xC0 | encode)); 4760 // 0x00 - insert into lower 256 bits 4761 // 0x01 - insert into upper 256 bits 4762 emit_int8(0x01); 4763 } 4764 4765 void Assembler::vinsertf64x4h(XMMRegister dst, Address src) { 4766 assert(VM_Version::supports_avx(), ""); 4767 if (VM_Version::supports_evex()) { 4768 tuple_type = EVEX_T4; 4769 input_size_in_bits = EVEX_64bit; 4770 } 4771 InstructionMark im(this); 4772 int vector_len = AVX_512bit; 4773 assert(dst != xnoreg, "sanity"); 4774 int dst_enc = dst->encoding(); 4775 // swap src<->dst for encoding 4776 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector_len); 4777 emit_int8(0x1A); 4778 emit_operand(dst, src); 4779 // 0x01 - insert into upper 128 bits 4780 emit_int8(0x01); 4781 } 4782 4783 void Assembler::vinsertf128h(XMMRegister dst, Address src) { 4784 assert(VM_Version::supports_avx(), ""); 4785 if (VM_Version::supports_evex()) { 4786 tuple_type = EVEX_T4; 4787 input_size_in_bits = EVEX_32bit; 4788 } 4789 InstructionMark im(this); 4790 int vector_len = AVX_256bit; 4791 assert(dst != xnoreg, "sanity"); 4792 int dst_enc = dst->encoding(); 4793 // swap src<->dst for encoding 4794 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4795 emit_int8(0x18); 4796 emit_operand(dst, src); 4797 // 0x01 - insert into upper 128 bits 4798 emit_int8(0x01); 4799 } 4800 4801 void Assembler::vextractf128h(XMMRegister dst, XMMRegister src) { 4802 assert(VM_Version::supports_avx(), ""); 4803 int vector_len = AVX_256bit; 4804 int encode = vex_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4805 emit_int8(0x19); 4806 emit_int8((unsigned char)(0xC0 | encode)); 4807 // 0x00 - insert into lower 128 bits 4808 // 0x01 - insert into upper 128 bits 4809 emit_int8(0x01); 4810 } 4811 4812 void Assembler::vextractf128h(Address dst, XMMRegister src) { 4813 assert(VM_Version::supports_avx(), ""); 4814 if (VM_Version::supports_evex()) { 4815 tuple_type = EVEX_T4; 4816 input_size_in_bits = EVEX_32bit; 4817 } 4818 InstructionMark im(this); 4819 int vector_len = AVX_256bit; 4820 assert(src != xnoreg, "sanity"); 4821 int src_enc = src->encoding(); 4822 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4823 emit_int8(0x19); 4824 emit_operand(src, dst); 4825 // 0x01 - extract from upper 128 bits 4826 emit_int8(0x01); 4827 } 4828 4829 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4830 assert(VM_Version::supports_avx2(), ""); 4831 int vector_len = AVX_256bit; 4832 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4833 emit_int8(0x38); 4834 emit_int8((unsigned char)(0xC0 | encode)); 4835 // 0x00 - insert into lower 128 bits 4836 // 0x01 - insert into upper 128 bits 4837 emit_int8(0x01); 4838 } 4839 4840 void Assembler::vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4841 assert(VM_Version::supports_evex(), ""); 4842 int vector_len = AVX_512bit; 4843 int src_enc = src->encoding(); 4844 int dst_enc = dst->encoding(); 4845 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4846 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4847 VM_Version::supports_avx512dq(), vector_len, false, false); 4848 emit_int8(0x38); 4849 emit_int8((unsigned char)(0xC0 | encode)); 4850 // 0x00 - insert into lower 256 bits 4851 // 0x01 - insert into upper 256 bits 4852 emit_int8(0x01); 4853 } 4854 4855 void Assembler::vinserti128h(XMMRegister dst, Address src) { 4856 assert(VM_Version::supports_avx2(), ""); 4857 if (VM_Version::supports_evex()) { 4858 tuple_type = EVEX_T4; 4859 input_size_in_bits = EVEX_32bit; 4860 } 4861 InstructionMark im(this); 4862 int vector_len = AVX_256bit; 4863 assert(dst != xnoreg, "sanity"); 4864 int dst_enc = dst->encoding(); 4865 // swap src<->dst for encoding 4866 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4867 emit_int8(0x38); 4868 emit_operand(dst, src); 4869 // 0x01 - insert into upper 128 bits 4870 emit_int8(0x01); 4871 } 4872 4873 void Assembler::vextracti128h(XMMRegister dst, XMMRegister src) { 4874 assert(VM_Version::supports_avx(), ""); 4875 int vector_len = AVX_256bit; 4876 int encode = vex_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4877 emit_int8(0x39); 4878 emit_int8((unsigned char)(0xC0 | encode)); 4879 // 0x00 - insert into lower 128 bits 4880 // 0x01 - insert into upper 128 bits 4881 emit_int8(0x01); 4882 } 4883 4884 void Assembler::vextracti128h(Address dst, XMMRegister src) { 4885 assert(VM_Version::supports_avx2(), ""); 4886 if (VM_Version::supports_evex()) { 4887 tuple_type = EVEX_T4; 4888 input_size_in_bits = EVEX_32bit; 4889 } 4890 InstructionMark im(this); 4891 int vector_len = AVX_256bit; 4892 assert(src != xnoreg, "sanity"); 4893 int src_enc = src->encoding(); 4894 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4895 emit_int8(0x39); 4896 emit_operand(src, dst); 4897 // 0x01 - extract from upper 128 bits 4898 emit_int8(0x01); 4899 } 4900 4901 void Assembler::vextracti64x4h(XMMRegister dst, XMMRegister src) { 4902 assert(VM_Version::supports_evex(), ""); 4903 int vector_len = AVX_512bit; 4904 int src_enc = src->encoding(); 4905 int dst_enc = dst->encoding(); 4906 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4907 true, vector_len, false, false); 4908 emit_int8(0x3B); 4909 emit_int8((unsigned char)(0xC0 | encode)); 4910 // 0x01 - extract from upper 256 bits 4911 emit_int8(0x01); 4912 } 4913 4914 void Assembler::vextracti64x2h(XMMRegister dst, XMMRegister src, int value) { 4915 assert(VM_Version::supports_evex(), ""); 4916 int vector_len = AVX_512bit; 4917 int src_enc = src->encoding(); 4918 int dst_enc = dst->encoding(); 4919 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4920 VM_Version::supports_avx512dq(), vector_len, false, false); 4921 emit_int8(0x39); 4922 emit_int8((unsigned char)(0xC0 | encode)); 4923 // 0x01 - extract from bits 255:128 4924 // 0x02 - extract from bits 383:256 4925 // 0x03 - extract from bits 511:384 4926 emit_int8(value & 0x3); 4927 } 4928 4929 void Assembler::vextractf64x4h(XMMRegister dst, XMMRegister src) { 4930 assert(VM_Version::supports_evex(), ""); 4931 int vector_len = AVX_512bit; 4932 int src_enc = src->encoding(); 4933 int dst_enc = dst->encoding(); 4934 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4935 VM_Version::supports_avx512dq(), vector_len, false, false); 4936 emit_int8(0x1B); 4937 emit_int8((unsigned char)(0xC0 | encode)); 4938 // 0x01 - extract from upper 256 bits 4939 emit_int8(0x01); 4940 } 4941 4942 void Assembler::vextractf64x4h(Address dst, XMMRegister src) { 4943 assert(VM_Version::supports_avx2(), ""); 4944 tuple_type = EVEX_T4; 4945 input_size_in_bits = EVEX_64bit; 4946 InstructionMark im(this); 4947 int vector_len = AVX_512bit; 4948 assert(src != xnoreg, "sanity"); 4949 int src_enc = src->encoding(); 4950 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4951 VM_Version::supports_avx512dq(), vector_len); 4952 emit_int8(0x1B); 4953 emit_operand(src, dst); 4954 // 0x01 - extract from upper 128 bits 4955 emit_int8(0x01); 4956 } 4957 4958 void Assembler::vextractf32x4h(XMMRegister dst, XMMRegister src, int value) { 4959 assert(VM_Version::supports_evex(), ""); 4960 int vector_len = AVX_512bit; 4961 int src_enc = src->encoding(); 4962 int dst_enc = dst->encoding(); 4963 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, 4964 VEX_OPCODE_0F_3A, false, vector_len, false, false); 4965 emit_int8(0x19); 4966 emit_int8((unsigned char)(0xC0 | encode)); 4967 // 0x01 - extract from bits 255:128 4968 // 0x02 - extract from bits 383:256 4969 // 0x03 - extract from bits 511:384 4970 emit_int8(value & 0x3); 4971 } 4972 4973 void Assembler::vextractf64x2h(XMMRegister dst, XMMRegister src, int value) { 4974 assert(VM_Version::supports_evex(), ""); 4975 int vector_len = AVX_512bit; 4976 int src_enc = src->encoding(); 4977 int dst_enc = dst->encoding(); 4978 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4979 VM_Version::supports_avx512dq(), vector_len, false, false); 4980 emit_int8(0x19); 4981 emit_int8((unsigned char)(0xC0 | encode)); 4982 // 0x01 - extract from bits 255:128 4983 // 0x02 - extract from bits 383:256 4984 // 0x03 - extract from bits 511:384 4985 emit_int8(value & 0x3); 4986 } 4987 4988 // duplicate 4-bytes integer data from src into 8 locations in dest 4989 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) { 4990 assert(VM_Version::supports_avx2(), ""); 4991 int vector_len = AVX_256bit; 4992 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 4993 vector_len, VEX_OPCODE_0F_38, false); 4994 emit_int8(0x58); 4995 emit_int8((unsigned char)(0xC0 | encode)); 4996 } 4997 4998 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL 4999 void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { 5000 assert(VM_Version::supports_evex(), ""); 5001 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 5002 vector_len, VEX_OPCODE_0F_38, false); 5003 emit_int8(0x78); 5004 emit_int8((unsigned char)(0xC0 | encode)); 5005 } 5006 5007 void Assembler::evpbroadcastb(XMMRegister dst, Address src, int vector_len) { 5008 assert(VM_Version::supports_evex(), ""); 5009 tuple_type = EVEX_T1S; 5010 input_size_in_bits = EVEX_8bit; 5011 InstructionMark im(this); 5012 assert(dst != xnoreg, "sanity"); 5013 int dst_enc = dst->encoding(); 5014 // swap src<->dst for encoding 5015 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5016 emit_int8(0x78); 5017 emit_operand(dst, src); 5018 } 5019 5020 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL 5021 void Assembler::evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 5022 assert(VM_Version::supports_evex(), ""); 5023 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 5024 vector_len, VEX_OPCODE_0F_38, false); 5025 emit_int8(0x79); 5026 emit_int8((unsigned char)(0xC0 | encode)); 5027 } 5028 5029 void Assembler::evpbroadcastw(XMMRegister dst, Address src, int vector_len) { 5030 assert(VM_Version::supports_evex(), ""); 5031 tuple_type = EVEX_T1S; 5032 input_size_in_bits = EVEX_16bit; 5033 InstructionMark im(this); 5034 assert(dst != xnoreg, "sanity"); 5035 int dst_enc = dst->encoding(); 5036 // swap src<->dst for encoding 5037 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5038 emit_int8(0x79); 5039 emit_operand(dst, src); 5040 } 5041 5042 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5043 void Assembler::evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { 5044 assert(VM_Version::supports_evex(), ""); 5045 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 5046 vector_len, VEX_OPCODE_0F_38, false); 5047 emit_int8(0x58); 5048 emit_int8((unsigned char)(0xC0 | encode)); 5049 } 5050 5051 void Assembler::evpbroadcastd(XMMRegister dst, Address src, int vector_len) { 5052 assert(VM_Version::supports_evex(), ""); 5053 tuple_type = EVEX_T1S; 5054 input_size_in_bits = EVEX_32bit; 5055 InstructionMark im(this); 5056 assert(dst != xnoreg, "sanity"); 5057 int dst_enc = dst->encoding(); 5058 // swap src<->dst for encoding 5059 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5060 emit_int8(0x58); 5061 emit_operand(dst, src); 5062 } 5063 5064 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5065 void Assembler::evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { 5066 assert(VM_Version::supports_evex(), ""); 5067 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5068 VEX_OPCODE_0F_38, true, vector_len, false, false); 5069 emit_int8(0x59); 5070 emit_int8((unsigned char)(0xC0 | encode)); 5071 } 5072 5073 void Assembler::evpbroadcastq(XMMRegister dst, Address src, int vector_len) { 5074 assert(VM_Version::supports_evex(), ""); 5075 tuple_type = EVEX_T1S; 5076 input_size_in_bits = EVEX_64bit; 5077 InstructionMark im(this); 5078 assert(dst != xnoreg, "sanity"); 5079 int dst_enc = dst->encoding(); 5080 // swap src<->dst for encoding 5081 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len); 5082 emit_int8(0x59); 5083 emit_operand(dst, src); 5084 } 5085 5086 // duplicate single precision fp from src into 4|8|16 locations in dest : requires AVX512VL 5087 void Assembler::evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { 5088 assert(VM_Version::supports_evex(), ""); 5089 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5090 VEX_OPCODE_0F_38, false, vector_len, false, false); 5091 emit_int8(0x18); 5092 emit_int8((unsigned char)(0xC0 | encode)); 5093 } 5094 5095 void Assembler::evpbroadcastss(XMMRegister dst, Address src, int vector_len) { 5096 assert(VM_Version::supports_evex(), ""); 5097 tuple_type = EVEX_T1S; 5098 input_size_in_bits = EVEX_32bit; 5099 InstructionMark im(this); 5100 assert(dst != xnoreg, "sanity"); 5101 int dst_enc = dst->encoding(); 5102 // swap src<->dst for encoding 5103 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5104 emit_int8(0x18); 5105 emit_operand(dst, src); 5106 } 5107 5108 // duplicate double precision fp from src into 2|4|8 locations in dest : requires AVX512VL 5109 void Assembler::evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { 5110 assert(VM_Version::supports_evex(), ""); 5111 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5112 VEX_OPCODE_0F_38, true, vector_len, false, false); 5113 emit_int8(0x19); 5114 emit_int8((unsigned char)(0xC0 | encode)); 5115 } 5116 5117 void Assembler::evpbroadcastsd(XMMRegister dst, Address src, int vector_len) { 5118 assert(VM_Version::supports_evex(), ""); 5119 tuple_type = EVEX_T1S; 5120 input_size_in_bits = EVEX_64bit; 5121 InstructionMark im(this); 5122 assert(dst != xnoreg, "sanity"); 5123 int dst_enc = dst->encoding(); 5124 // swap src<->dst for encoding 5125 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len); 5126 emit_int8(0x19); 5127 emit_operand(dst, src); 5128 } 5129 5130 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL 5131 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { 5132 assert(VM_Version::supports_evex(), ""); 5133 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5134 VEX_OPCODE_0F_38, false, vector_len, false, false); 5135 emit_int8(0x7A); 5136 emit_int8((unsigned char)(0xC0 | encode)); 5137 } 5138 5139 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL 5140 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { 5141 assert(VM_Version::supports_evex(), ""); 5142 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5143 VEX_OPCODE_0F_38, false, vector_len, false, false); 5144 emit_int8(0x7B); 5145 emit_int8((unsigned char)(0xC0 | encode)); 5146 } 5147 5148 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5149 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { 5150 assert(VM_Version::supports_evex(), ""); 5151 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5152 VEX_OPCODE_0F_38, false, vector_len, false, false); 5153 emit_int8(0x7C); 5154 emit_int8((unsigned char)(0xC0 | encode)); 5155 } 5156 5157 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5158 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { 5159 assert(VM_Version::supports_evex(), ""); 5160 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5161 VEX_OPCODE_0F_38, true, vector_len, false, false); 5162 emit_int8(0x7C); 5163 emit_int8((unsigned char)(0xC0 | encode)); 5164 } 5165 5166 // Carry-Less Multiplication Quadword 5167 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { 5168 assert(VM_Version::supports_clmul(), ""); 5169 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 5170 VEX_OPCODE_0F_3A, false, AVX_128bit, true); 5171 emit_int8(0x44); 5172 emit_int8((unsigned char)(0xC0 | encode)); 5173 emit_int8((unsigned char)mask); 5174 } 5175 5176 // Carry-Less Multiplication Quadword 5177 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 5178 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 5179 int vector_len = AVX_128bit; 5180 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, 5181 vector_len, VEX_OPCODE_0F_3A, true); 5182 emit_int8(0x44); 5183 emit_int8((unsigned char)(0xC0 | encode)); 5184 emit_int8((unsigned char)mask); 5185 } 5186 5187 void Assembler::vzeroupper() { 5188 assert(VM_Version::supports_avx(), ""); 5189 if (UseAVX < 3) 5190 { 5191 (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE); 5192 emit_int8(0x77); 5193 } 5194 } 5195 5196 5197 #ifndef _LP64 5198 // 32bit only pieces of the assembler 5199 5200 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 5201 // NO PREFIX AS NEVER 64BIT 5202 InstructionMark im(this); 5203 emit_int8((unsigned char)0x81); 5204 emit_int8((unsigned char)(0xF8 | src1->encoding())); 5205 emit_data(imm32, rspec, 0); 5206 } 5207 5208 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { 5209 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs 5210 InstructionMark im(this); 5211 emit_int8((unsigned char)0x81); 5212 emit_operand(rdi, src1); 5213 emit_data(imm32, rspec, 0); 5214 } 5215 5216 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, 5217 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded 5218 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. 5219 void Assembler::cmpxchg8(Address adr) { 5220 InstructionMark im(this); 5221 emit_int8(0x0F); 5222 emit_int8((unsigned char)0xC7); 5223 emit_operand(rcx, adr); 5224 } 5225 5226 void Assembler::decl(Register dst) { 5227 // Don't use it directly. Use MacroAssembler::decrementl() instead. 5228 emit_int8(0x48 | dst->encoding()); 5229 } 5230 5231 #endif // _LP64 5232 5233 // 64bit typically doesn't use the x87 but needs to for the trig funcs 5234 5235 void Assembler::fabs() { 5236 emit_int8((unsigned char)0xD9); 5237 emit_int8((unsigned char)0xE1); 5238 } 5239 5240 void Assembler::fadd(int i) { 5241 emit_farith(0xD8, 0xC0, i); 5242 } 5243 5244 void Assembler::fadd_d(Address src) { 5245 InstructionMark im(this); 5246 emit_int8((unsigned char)0xDC); 5247 emit_operand32(rax, src); 5248 } 5249 5250 void Assembler::fadd_s(Address src) { 5251 InstructionMark im(this); 5252 emit_int8((unsigned char)0xD8); 5253 emit_operand32(rax, src); 5254 } 5255 5256 void Assembler::fadda(int i) { 5257 emit_farith(0xDC, 0xC0, i); 5258 } 5259 5260 void Assembler::faddp(int i) { 5261 emit_farith(0xDE, 0xC0, i); 5262 } 5263 5264 void Assembler::fchs() { 5265 emit_int8((unsigned char)0xD9); 5266 emit_int8((unsigned char)0xE0); 5267 } 5268 5269 void Assembler::fcom(int i) { 5270 emit_farith(0xD8, 0xD0, i); 5271 } 5272 5273 void Assembler::fcomp(int i) { 5274 emit_farith(0xD8, 0xD8, i); 5275 } 5276 5277 void Assembler::fcomp_d(Address src) { 5278 InstructionMark im(this); 5279 emit_int8((unsigned char)0xDC); 5280 emit_operand32(rbx, src); 5281 } 5282 5283 void Assembler::fcomp_s(Address src) { 5284 InstructionMark im(this); 5285 emit_int8((unsigned char)0xD8); 5286 emit_operand32(rbx, src); 5287 } 5288 5289 void Assembler::fcompp() { 5290 emit_int8((unsigned char)0xDE); 5291 emit_int8((unsigned char)0xD9); 5292 } 5293 5294 void Assembler::fcos() { 5295 emit_int8((unsigned char)0xD9); 5296 emit_int8((unsigned char)0xFF); 5297 } 5298 5299 void Assembler::fdecstp() { 5300 emit_int8((unsigned char)0xD9); 5301 emit_int8((unsigned char)0xF6); 5302 } 5303 5304 void Assembler::fdiv(int i) { 5305 emit_farith(0xD8, 0xF0, i); 5306 } 5307 5308 void Assembler::fdiv_d(Address src) { 5309 InstructionMark im(this); 5310 emit_int8((unsigned char)0xDC); 5311 emit_operand32(rsi, src); 5312 } 5313 5314 void Assembler::fdiv_s(Address src) { 5315 InstructionMark im(this); 5316 emit_int8((unsigned char)0xD8); 5317 emit_operand32(rsi, src); 5318 } 5319 5320 void Assembler::fdiva(int i) { 5321 emit_farith(0xDC, 0xF8, i); 5322 } 5323 5324 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) 5325 // is erroneous for some of the floating-point instructions below. 5326 5327 void Assembler::fdivp(int i) { 5328 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) 5329 } 5330 5331 void Assembler::fdivr(int i) { 5332 emit_farith(0xD8, 0xF8, i); 5333 } 5334 5335 void Assembler::fdivr_d(Address src) { 5336 InstructionMark im(this); 5337 emit_int8((unsigned char)0xDC); 5338 emit_operand32(rdi, src); 5339 } 5340 5341 void Assembler::fdivr_s(Address src) { 5342 InstructionMark im(this); 5343 emit_int8((unsigned char)0xD8); 5344 emit_operand32(rdi, src); 5345 } 5346 5347 void Assembler::fdivra(int i) { 5348 emit_farith(0xDC, 0xF0, i); 5349 } 5350 5351 void Assembler::fdivrp(int i) { 5352 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) 5353 } 5354 5355 void Assembler::ffree(int i) { 5356 emit_farith(0xDD, 0xC0, i); 5357 } 5358 5359 void Assembler::fild_d(Address adr) { 5360 InstructionMark im(this); 5361 emit_int8((unsigned char)0xDF); 5362 emit_operand32(rbp, adr); 5363 } 5364 5365 void Assembler::fild_s(Address adr) { 5366 InstructionMark im(this); 5367 emit_int8((unsigned char)0xDB); 5368 emit_operand32(rax, adr); 5369 } 5370 5371 void Assembler::fincstp() { 5372 emit_int8((unsigned char)0xD9); 5373 emit_int8((unsigned char)0xF7); 5374 } 5375 5376 void Assembler::finit() { 5377 emit_int8((unsigned char)0x9B); 5378 emit_int8((unsigned char)0xDB); 5379 emit_int8((unsigned char)0xE3); 5380 } 5381 5382 void Assembler::fist_s(Address adr) { 5383 InstructionMark im(this); 5384 emit_int8((unsigned char)0xDB); 5385 emit_operand32(rdx, adr); 5386 } 5387 5388 void Assembler::fistp_d(Address adr) { 5389 InstructionMark im(this); 5390 emit_int8((unsigned char)0xDF); 5391 emit_operand32(rdi, adr); 5392 } 5393 5394 void Assembler::fistp_s(Address adr) { 5395 InstructionMark im(this); 5396 emit_int8((unsigned char)0xDB); 5397 emit_operand32(rbx, adr); 5398 } 5399 5400 void Assembler::fld1() { 5401 emit_int8((unsigned char)0xD9); 5402 emit_int8((unsigned char)0xE8); 5403 } 5404 5405 void Assembler::fld_d(Address adr) { 5406 InstructionMark im(this); 5407 emit_int8((unsigned char)0xDD); 5408 emit_operand32(rax, adr); 5409 } 5410 5411 void Assembler::fld_s(Address adr) { 5412 InstructionMark im(this); 5413 emit_int8((unsigned char)0xD9); 5414 emit_operand32(rax, adr); 5415 } 5416 5417 5418 void Assembler::fld_s(int index) { 5419 emit_farith(0xD9, 0xC0, index); 5420 } 5421 5422 void Assembler::fld_x(Address adr) { 5423 InstructionMark im(this); 5424 emit_int8((unsigned char)0xDB); 5425 emit_operand32(rbp, adr); 5426 } 5427 5428 void Assembler::fldcw(Address src) { 5429 InstructionMark im(this); 5430 emit_int8((unsigned char)0xD9); 5431 emit_operand32(rbp, src); 5432 } 5433 5434 void Assembler::fldenv(Address src) { 5435 InstructionMark im(this); 5436 emit_int8((unsigned char)0xD9); 5437 emit_operand32(rsp, src); 5438 } 5439 5440 void Assembler::fldlg2() { 5441 emit_int8((unsigned char)0xD9); 5442 emit_int8((unsigned char)0xEC); 5443 } 5444 5445 void Assembler::fldln2() { 5446 emit_int8((unsigned char)0xD9); 5447 emit_int8((unsigned char)0xED); 5448 } 5449 5450 void Assembler::fldz() { 5451 emit_int8((unsigned char)0xD9); 5452 emit_int8((unsigned char)0xEE); 5453 } 5454 5455 void Assembler::flog() { 5456 fldln2(); 5457 fxch(); 5458 fyl2x(); 5459 } 5460 5461 void Assembler::flog10() { 5462 fldlg2(); 5463 fxch(); 5464 fyl2x(); 5465 } 5466 5467 void Assembler::fmul(int i) { 5468 emit_farith(0xD8, 0xC8, i); 5469 } 5470 5471 void Assembler::fmul_d(Address src) { 5472 InstructionMark im(this); 5473 emit_int8((unsigned char)0xDC); 5474 emit_operand32(rcx, src); 5475 } 5476 5477 void Assembler::fmul_s(Address src) { 5478 InstructionMark im(this); 5479 emit_int8((unsigned char)0xD8); 5480 emit_operand32(rcx, src); 5481 } 5482 5483 void Assembler::fmula(int i) { 5484 emit_farith(0xDC, 0xC8, i); 5485 } 5486 5487 void Assembler::fmulp(int i) { 5488 emit_farith(0xDE, 0xC8, i); 5489 } 5490 5491 void Assembler::fnsave(Address dst) { 5492 InstructionMark im(this); 5493 emit_int8((unsigned char)0xDD); 5494 emit_operand32(rsi, dst); 5495 } 5496 5497 void Assembler::fnstcw(Address src) { 5498 InstructionMark im(this); 5499 emit_int8((unsigned char)0x9B); 5500 emit_int8((unsigned char)0xD9); 5501 emit_operand32(rdi, src); 5502 } 5503 5504 void Assembler::fnstsw_ax() { 5505 emit_int8((unsigned char)0xDF); 5506 emit_int8((unsigned char)0xE0); 5507 } 5508 5509 void Assembler::fprem() { 5510 emit_int8((unsigned char)0xD9); 5511 emit_int8((unsigned char)0xF8); 5512 } 5513 5514 void Assembler::fprem1() { 5515 emit_int8((unsigned char)0xD9); 5516 emit_int8((unsigned char)0xF5); 5517 } 5518 5519 void Assembler::frstor(Address src) { 5520 InstructionMark im(this); 5521 emit_int8((unsigned char)0xDD); 5522 emit_operand32(rsp, src); 5523 } 5524 5525 void Assembler::fsin() { 5526 emit_int8((unsigned char)0xD9); 5527 emit_int8((unsigned char)0xFE); 5528 } 5529 5530 void Assembler::fsqrt() { 5531 emit_int8((unsigned char)0xD9); 5532 emit_int8((unsigned char)0xFA); 5533 } 5534 5535 void Assembler::fst_d(Address adr) { 5536 InstructionMark im(this); 5537 emit_int8((unsigned char)0xDD); 5538 emit_operand32(rdx, adr); 5539 } 5540 5541 void Assembler::fst_s(Address adr) { 5542 InstructionMark im(this); 5543 emit_int8((unsigned char)0xD9); 5544 emit_operand32(rdx, adr); 5545 } 5546 5547 void Assembler::fstp_d(Address adr) { 5548 InstructionMark im(this); 5549 emit_int8((unsigned char)0xDD); 5550 emit_operand32(rbx, adr); 5551 } 5552 5553 void Assembler::fstp_d(int index) { 5554 emit_farith(0xDD, 0xD8, index); 5555 } 5556 5557 void Assembler::fstp_s(Address adr) { 5558 InstructionMark im(this); 5559 emit_int8((unsigned char)0xD9); 5560 emit_operand32(rbx, adr); 5561 } 5562 5563 void Assembler::fstp_x(Address adr) { 5564 InstructionMark im(this); 5565 emit_int8((unsigned char)0xDB); 5566 emit_operand32(rdi, adr); 5567 } 5568 5569 void Assembler::fsub(int i) { 5570 emit_farith(0xD8, 0xE0, i); 5571 } 5572 5573 void Assembler::fsub_d(Address src) { 5574 InstructionMark im(this); 5575 emit_int8((unsigned char)0xDC); 5576 emit_operand32(rsp, src); 5577 } 5578 5579 void Assembler::fsub_s(Address src) { 5580 InstructionMark im(this); 5581 emit_int8((unsigned char)0xD8); 5582 emit_operand32(rsp, src); 5583 } 5584 5585 void Assembler::fsuba(int i) { 5586 emit_farith(0xDC, 0xE8, i); 5587 } 5588 5589 void Assembler::fsubp(int i) { 5590 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) 5591 } 5592 5593 void Assembler::fsubr(int i) { 5594 emit_farith(0xD8, 0xE8, i); 5595 } 5596 5597 void Assembler::fsubr_d(Address src) { 5598 InstructionMark im(this); 5599 emit_int8((unsigned char)0xDC); 5600 emit_operand32(rbp, src); 5601 } 5602 5603 void Assembler::fsubr_s(Address src) { 5604 InstructionMark im(this); 5605 emit_int8((unsigned char)0xD8); 5606 emit_operand32(rbp, src); 5607 } 5608 5609 void Assembler::fsubra(int i) { 5610 emit_farith(0xDC, 0xE0, i); 5611 } 5612 5613 void Assembler::fsubrp(int i) { 5614 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) 5615 } 5616 5617 void Assembler::ftan() { 5618 emit_int8((unsigned char)0xD9); 5619 emit_int8((unsigned char)0xF2); 5620 emit_int8((unsigned char)0xDD); 5621 emit_int8((unsigned char)0xD8); 5622 } 5623 5624 void Assembler::ftst() { 5625 emit_int8((unsigned char)0xD9); 5626 emit_int8((unsigned char)0xE4); 5627 } 5628 5629 void Assembler::fucomi(int i) { 5630 // make sure the instruction is supported (introduced for P6, together with cmov) 5631 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 5632 emit_farith(0xDB, 0xE8, i); 5633 } 5634 5635 void Assembler::fucomip(int i) { 5636 // make sure the instruction is supported (introduced for P6, together with cmov) 5637 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 5638 emit_farith(0xDF, 0xE8, i); 5639 } 5640 5641 void Assembler::fwait() { 5642 emit_int8((unsigned char)0x9B); 5643 } 5644 5645 void Assembler::fxch(int i) { 5646 emit_farith(0xD9, 0xC8, i); 5647 } 5648 5649 void Assembler::fyl2x() { 5650 emit_int8((unsigned char)0xD9); 5651 emit_int8((unsigned char)0xF1); 5652 } 5653 5654 void Assembler::frndint() { 5655 emit_int8((unsigned char)0xD9); 5656 emit_int8((unsigned char)0xFC); 5657 } 5658 5659 void Assembler::f2xm1() { 5660 emit_int8((unsigned char)0xD9); 5661 emit_int8((unsigned char)0xF0); 5662 } 5663 5664 void Assembler::fldl2e() { 5665 emit_int8((unsigned char)0xD9); 5666 emit_int8((unsigned char)0xEA); 5667 } 5668 5669 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. 5670 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; 5671 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding. 5672 static int simd_opc[4] = { 0, 0, 0x38, 0x3A }; 5673 5674 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding. 5675 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 5676 if (pre > 0) { 5677 emit_int8(simd_pre[pre]); 5678 } 5679 if (rex_w) { 5680 prefixq(adr, xreg); 5681 } else { 5682 prefix(adr, xreg); 5683 } 5684 if (opc > 0) { 5685 emit_int8(0x0F); 5686 int opc2 = simd_opc[opc]; 5687 if (opc2 > 0) { 5688 emit_int8(opc2); 5689 } 5690 } 5691 } 5692 5693 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 5694 if (pre > 0) { 5695 emit_int8(simd_pre[pre]); 5696 } 5697 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : 5698 prefix_and_encode(dst_enc, src_enc); 5699 if (opc > 0) { 5700 emit_int8(0x0F); 5701 int opc2 = simd_opc[opc]; 5702 if (opc2 > 0) { 5703 emit_int8(opc2); 5704 } 5705 } 5706 return encode; 5707 } 5708 5709 5710 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, int vector_len) { 5711 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { 5712 prefix(VEX_3bytes); 5713 5714 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); 5715 byte1 = (~byte1) & 0xE0; 5716 byte1 |= opc; 5717 emit_int8(byte1); 5718 5719 int byte2 = ((~nds_enc) & 0xf) << 3; 5720 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; 5721 emit_int8(byte2); 5722 } else { 5723 prefix(VEX_2bytes); 5724 5725 int byte1 = vex_r ? VEX_R : 0; 5726 byte1 = (~byte1) & 0x80; 5727 byte1 |= ((~nds_enc) & 0xf) << 3; 5728 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; 5729 emit_int8(byte1); 5730 } 5731 } 5732 5733 // This is a 4 byte encoding 5734 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, bool evex_r, bool evex_v, 5735 int nds_enc, VexSimdPrefix pre, VexOpcode opc, 5736 bool is_extended_context, bool is_merge_context, 5737 int vector_len, bool no_mask_reg ){ 5738 // EVEX 0x62 prefix 5739 prefix(EVEX_4bytes); 5740 evex_encoding = (vex_w ? VEX_W : 0) | (evex_r ? EVEX_Rb : 0); 5741 5742 // P0: byte 2, initialized to RXBR`00mm 5743 // instead of not'd 5744 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); 5745 byte2 = (~byte2) & 0xF0; 5746 // confine opc opcode extensions in mm bits to lower two bits 5747 // of form {0F, 0F_38, 0F_3A} 5748 byte2 |= opc; 5749 emit_int8(byte2); 5750 5751 // P1: byte 3 as Wvvvv1pp 5752 int byte3 = ((~nds_enc) & 0xf) << 3; 5753 // p[10] is always 1 5754 byte3 |= EVEX_F; 5755 byte3 |= (vex_w & 1) << 7; 5756 // confine pre opcode extensions in pp bits to lower two bits 5757 // of form {66, F3, F2} 5758 byte3 |= pre; 5759 emit_int8(byte3); 5760 5761 // P2: byte 4 as zL'Lbv'aaa 5762 int byte4 = (no_mask_reg) ? 0 : 1; // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now) 5763 // EVEX.v` for extending EVEX.vvvv or VIDX 5764 byte4 |= (evex_v ? 0: EVEX_V); 5765 // third EXEC.b for broadcast actions 5766 byte4 |= (is_extended_context ? EVEX_Rb : 0); 5767 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024 5768 byte4 |= ((vector_len) & 0x3) << 5; 5769 // last is EVEX.z for zero/merge actions 5770 byte4 |= (is_merge_context ? EVEX_Z : 0); 5771 emit_int8(byte4); 5772 } 5773 5774 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, 5775 VexOpcode opc, bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg) { 5776 bool vex_r = ((xreg_enc & 8) == 8) ? 1 : 0; 5777 bool vex_b = adr.base_needs_rex(); 5778 bool vex_x = adr.index_needs_rex(); 5779 avx_vector_len = vector_len; 5780 5781 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit 5782 if (VM_Version::supports_avx512vl() == false) { 5783 switch (vector_len) { 5784 case AVX_128bit: 5785 case AVX_256bit: 5786 legacy_mode = true; 5787 break; 5788 } 5789 } 5790 5791 if ((UseAVX > 2) && (legacy_mode == false)) 5792 { 5793 bool evex_r = (xreg_enc >= 16); 5794 bool evex_v = (nds_enc >= 16); 5795 is_evex_instruction = true; 5796 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg); 5797 } else { 5798 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector_len); 5799 } 5800 } 5801 5802 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, 5803 bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg ) { 5804 bool vex_r = ((dst_enc & 8) == 8) ? 1 : 0; 5805 bool vex_b = ((src_enc & 8) == 8) ? 1 : 0; 5806 bool vex_x = false; 5807 avx_vector_len = vector_len; 5808 5809 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit 5810 if (VM_Version::supports_avx512vl() == false) { 5811 switch (vector_len) { 5812 case AVX_128bit: 5813 case AVX_256bit: 5814 legacy_mode = true; 5815 break; 5816 } 5817 } 5818 5819 if ((UseAVX > 2) && (legacy_mode == false)) 5820 { 5821 bool evex_r = (dst_enc >= 16); 5822 bool evex_v = (nds_enc >= 16); 5823 // can use vex_x as bank extender on rm encoding 5824 vex_x = (src_enc >= 16); 5825 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg); 5826 } else { 5827 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector_len); 5828 } 5829 5830 // return modrm byte components for operands 5831 return (((dst_enc & 7) << 3) | (src_enc & 7)); 5832 } 5833 5834 5835 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 5836 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len, bool legacy_mode) { 5837 if (UseAVX > 0) { 5838 int xreg_enc = xreg->encoding(); 5839 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5840 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector_len, legacy_mode, no_mask_reg); 5841 } else { 5842 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); 5843 rex_prefix(adr, xreg, pre, opc, rex_w); 5844 } 5845 } 5846 5847 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 5848 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len, bool legacy_mode) { 5849 int dst_enc = dst->encoding(); 5850 int src_enc = src->encoding(); 5851 if (UseAVX > 0) { 5852 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5853 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector_len, legacy_mode, no_mask_reg); 5854 } else { 5855 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); 5856 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w); 5857 } 5858 } 5859 5860 int Assembler::kreg_prefix_and_encode(KRegister dst, KRegister nds, KRegister src, VexSimdPrefix pre, 5861 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len) { 5862 int dst_enc = dst->encoding(); 5863 int src_enc = src->encoding(); 5864 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5865 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector_len, true, no_mask_reg); 5866 } 5867 5868 int Assembler::kreg_prefix_and_encode(KRegister dst, KRegister nds, Register src, VexSimdPrefix pre, 5869 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len) { 5870 int dst_enc = dst->encoding(); 5871 int src_enc = src->encoding(); 5872 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5873 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector_len, true, no_mask_reg); 5874 } 5875 5876 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg, bool legacy_mode) { 5877 InstructionMark im(this); 5878 simd_prefix(dst, dst, src, pre, no_mask_reg, VEX_OPCODE_0F, false, AVX_128bit, legacy_mode); 5879 emit_int8(opcode); 5880 emit_operand(dst, src); 5881 } 5882 5883 void Assembler::emit_simd_arith_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg) { 5884 InstructionMark im(this); 5885 simd_prefix_q(dst, dst, src, pre, no_mask_reg); 5886 emit_int8(opcode); 5887 emit_operand(dst, src); 5888 } 5889 5890 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg, bool legacy_mode) { 5891 int encode = simd_prefix_and_encode(dst, dst, src, pre, no_mask_reg, VEX_OPCODE_0F, false, AVX_128bit, legacy_mode); 5892 emit_int8(opcode); 5893 emit_int8((unsigned char)(0xC0 | encode)); 5894 } 5895 5896 void Assembler::emit_simd_arith_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg) { 5897 int encode = simd_prefix_and_encode(dst, dst, src, pre, no_mask_reg, VEX_OPCODE_0F, true, AVX_128bit); 5898 emit_int8(opcode); 5899 emit_int8((unsigned char)(0xC0 | encode)); 5900 } 5901 5902 // Versions with no second source register (non-destructive source). 5903 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool opNoRegMask) { 5904 InstructionMark im(this); 5905 simd_prefix(dst, xnoreg, src, pre, opNoRegMask); 5906 emit_int8(opcode); 5907 emit_operand(dst, src); 5908 } 5909 5910 void Assembler::emit_simd_arith_nonds_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool opNoRegMask) { 5911 InstructionMark im(this); 5912 simd_prefix_q(dst, xnoreg, src, pre, opNoRegMask); 5913 emit_int8(opcode); 5914 emit_operand(dst, src); 5915 } 5916 5917 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg, bool legacy_mode) { 5918 int encode = simd_prefix_and_encode(dst, xnoreg, src, pre, no_mask_reg, VEX_OPCODE_0F, legacy_mode, AVX_128bit); 5919 emit_int8(opcode); 5920 emit_int8((unsigned char)(0xC0 | encode)); 5921 } 5922 5923 void Assembler::emit_simd_arith_nonds_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg) { 5924 int encode = simd_prefix_and_encode(dst, xnoreg, src, pre, no_mask_reg, VEX_OPCODE_0F, true, AVX_128bit); 5925 emit_int8(opcode); 5926 emit_int8((unsigned char)(0xC0 | encode)); 5927 } 5928 5929 // 3-operands AVX instructions 5930 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, Address src, 5931 VexSimdPrefix pre, int vector_len, bool no_mask_reg, bool legacy_mode) { 5932 InstructionMark im(this); 5933 vex_prefix(dst, nds, src, pre, vector_len, no_mask_reg, legacy_mode); 5934 emit_int8(opcode); 5935 emit_operand(dst, src); 5936 } 5937 5938 void Assembler::emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds, 5939 Address src, VexSimdPrefix pre, int vector_len, bool no_mask_reg) { 5940 InstructionMark im(this); 5941 vex_prefix_q(dst, nds, src, pre, vector_len, no_mask_reg); 5942 emit_int8(opcode); 5943 emit_operand(dst, src); 5944 } 5945 5946 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, 5947 VexSimdPrefix pre, int vector_len, bool no_mask_reg, bool legacy_mode) { 5948 int encode = vex_prefix_and_encode(dst, nds, src, pre, vector_len, VEX_OPCODE_0F, false, no_mask_reg); 5949 emit_int8(opcode); 5950 emit_int8((unsigned char)(0xC0 | encode)); 5951 } 5952 5953 void Assembler::emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, 5954 VexSimdPrefix pre, int vector_len, bool no_mask_reg) { 5955 int src_enc = src->encoding(); 5956 int dst_enc = dst->encoding(); 5957 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5958 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, VEX_OPCODE_0F, true, vector_len, false, no_mask_reg); 5959 emit_int8(opcode); 5960 emit_int8((unsigned char)(0xC0 | encode)); 5961 } 5962 5963 #ifndef _LP64 5964 5965 void Assembler::incl(Register dst) { 5966 // Don't use it directly. Use MacroAssembler::incrementl() instead. 5967 emit_int8(0x40 | dst->encoding()); 5968 } 5969 5970 void Assembler::lea(Register dst, Address src) { 5971 leal(dst, src); 5972 } 5973 5974 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { 5975 InstructionMark im(this); 5976 emit_int8((unsigned char)0xC7); 5977 emit_operand(rax, dst); 5978 emit_data((int)imm32, rspec, 0); 5979 } 5980 5981 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { 5982 InstructionMark im(this); 5983 int encode = prefix_and_encode(dst->encoding()); 5984 emit_int8((unsigned char)(0xB8 | encode)); 5985 emit_data((int)imm32, rspec, 0); 5986 } 5987 5988 void Assembler::popa() { // 32bit 5989 emit_int8(0x61); 5990 } 5991 5992 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { 5993 InstructionMark im(this); 5994 emit_int8(0x68); 5995 emit_data(imm32, rspec, 0); 5996 } 5997 5998 void Assembler::pusha() { // 32bit 5999 emit_int8(0x60); 6000 } 6001 6002 void Assembler::set_byte_if_not_zero(Register dst) { 6003 emit_int8(0x0F); 6004 emit_int8((unsigned char)0x95); 6005 emit_int8((unsigned char)(0xE0 | dst->encoding())); 6006 } 6007 6008 void Assembler::shldl(Register dst, Register src) { 6009 emit_int8(0x0F); 6010 emit_int8((unsigned char)0xA5); 6011 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 6012 } 6013 6014 void Assembler::shrdl(Register dst, Register src) { 6015 emit_int8(0x0F); 6016 emit_int8((unsigned char)0xAD); 6017 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 6018 } 6019 6020 #else // LP64 6021 6022 void Assembler::set_byte_if_not_zero(Register dst) { 6023 int enc = prefix_and_encode(dst->encoding(), true); 6024 emit_int8(0x0F); 6025 emit_int8((unsigned char)0x95); 6026 emit_int8((unsigned char)(0xE0 | enc)); 6027 } 6028 6029 // 64bit only pieces of the assembler 6030 // This should only be used by 64bit instructions that can use rip-relative 6031 // it cannot be used by instructions that want an immediate value. 6032 6033 bool Assembler::reachable(AddressLiteral adr) { 6034 int64_t disp; 6035 // None will force a 64bit literal to the code stream. Likely a placeholder 6036 // for something that will be patched later and we need to certain it will 6037 // always be reachable. 6038 if (adr.reloc() == relocInfo::none) { 6039 return false; 6040 } 6041 if (adr.reloc() == relocInfo::internal_word_type) { 6042 // This should be rip relative and easily reachable. 6043 return true; 6044 } 6045 if (adr.reloc() == relocInfo::virtual_call_type || 6046 adr.reloc() == relocInfo::opt_virtual_call_type || 6047 adr.reloc() == relocInfo::static_call_type || 6048 adr.reloc() == relocInfo::static_stub_type ) { 6049 // This should be rip relative within the code cache and easily 6050 // reachable until we get huge code caches. (At which point 6051 // ic code is going to have issues). 6052 return true; 6053 } 6054 if (adr.reloc() != relocInfo::external_word_type && 6055 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special 6056 adr.reloc() != relocInfo::poll_type && // relocs to identify them 6057 adr.reloc() != relocInfo::runtime_call_type ) { 6058 return false; 6059 } 6060 6061 // Stress the correction code 6062 if (ForceUnreachable) { 6063 // Must be runtimecall reloc, see if it is in the codecache 6064 // Flipping stuff in the codecache to be unreachable causes issues 6065 // with things like inline caches where the additional instructions 6066 // are not handled. 6067 if (CodeCache::find_blob(adr._target) == NULL) { 6068 return false; 6069 } 6070 } 6071 // For external_word_type/runtime_call_type if it is reachable from where we 6072 // are now (possibly a temp buffer) and where we might end up 6073 // anywhere in the codeCache then we are always reachable. 6074 // This would have to change if we ever save/restore shared code 6075 // to be more pessimistic. 6076 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); 6077 if (!is_simm32(disp)) return false; 6078 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); 6079 if (!is_simm32(disp)) return false; 6080 6081 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int)); 6082 6083 // Because rip relative is a disp + address_of_next_instruction and we 6084 // don't know the value of address_of_next_instruction we apply a fudge factor 6085 // to make sure we will be ok no matter the size of the instruction we get placed into. 6086 // We don't have to fudge the checks above here because they are already worst case. 6087 6088 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal 6089 // + 4 because better safe than sorry. 6090 const int fudge = 12 + 4; 6091 if (disp < 0) { 6092 disp -= fudge; 6093 } else { 6094 disp += fudge; 6095 } 6096 return is_simm32(disp); 6097 } 6098 6099 // Check if the polling page is not reachable from the code cache using rip-relative 6100 // addressing. 6101 bool Assembler::is_polling_page_far() { 6102 intptr_t addr = (intptr_t)os::get_polling_page(); 6103 return ForceUnreachable || 6104 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) || 6105 !is_simm32(addr - (intptr_t)CodeCache::high_bound()); 6106 } 6107 6108 void Assembler::emit_data64(jlong data, 6109 relocInfo::relocType rtype, 6110 int format) { 6111 if (rtype == relocInfo::none) { 6112 emit_int64(data); 6113 } else { 6114 emit_data64(data, Relocation::spec_simple(rtype), format); 6115 } 6116 } 6117 6118 void Assembler::emit_data64(jlong data, 6119 RelocationHolder const& rspec, 6120 int format) { 6121 assert(imm_operand == 0, "default format must be immediate in this file"); 6122 assert(imm_operand == format, "must be immediate"); 6123 assert(inst_mark() != NULL, "must be inside InstructionMark"); 6124 // Do not use AbstractAssembler::relocate, which is not intended for 6125 // embedded words. Instead, relocate to the enclosing instruction. 6126 code_section()->relocate(inst_mark(), rspec, format); 6127 #ifdef ASSERT 6128 check_relocation(rspec, format); 6129 #endif 6130 emit_int64(data); 6131 } 6132 6133 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 6134 if (reg_enc >= 8) { 6135 prefix(REX_B); 6136 reg_enc -= 8; 6137 } else if (byteinst && reg_enc >= 4) { 6138 prefix(REX); 6139 } 6140 return reg_enc; 6141 } 6142 6143 int Assembler::prefixq_and_encode(int reg_enc) { 6144 if (reg_enc < 8) { 6145 prefix(REX_W); 6146 } else { 6147 prefix(REX_WB); 6148 reg_enc -= 8; 6149 } 6150 return reg_enc; 6151 } 6152 6153 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { 6154 if (dst_enc < 8) { 6155 if (src_enc >= 8) { 6156 prefix(REX_B); 6157 src_enc -= 8; 6158 } else if (byteinst && src_enc >= 4) { 6159 prefix(REX); 6160 } 6161 } else { 6162 if (src_enc < 8) { 6163 prefix(REX_R); 6164 } else { 6165 prefix(REX_RB); 6166 src_enc -= 8; 6167 } 6168 dst_enc -= 8; 6169 } 6170 return dst_enc << 3 | src_enc; 6171 } 6172 6173 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { 6174 if (dst_enc < 8) { 6175 if (src_enc < 8) { 6176 prefix(REX_W); 6177 } else { 6178 prefix(REX_WB); 6179 src_enc -= 8; 6180 } 6181 } else { 6182 if (src_enc < 8) { 6183 prefix(REX_WR); 6184 } else { 6185 prefix(REX_WRB); 6186 src_enc -= 8; 6187 } 6188 dst_enc -= 8; 6189 } 6190 return dst_enc << 3 | src_enc; 6191 } 6192 6193 void Assembler::prefix(Register reg) { 6194 if (reg->encoding() >= 8) { 6195 prefix(REX_B); 6196 } 6197 } 6198 6199 void Assembler::prefix(Address adr) { 6200 if (adr.base_needs_rex()) { 6201 if (adr.index_needs_rex()) { 6202 prefix(REX_XB); 6203 } else { 6204 prefix(REX_B); 6205 } 6206 } else { 6207 if (adr.index_needs_rex()) { 6208 prefix(REX_X); 6209 } 6210 } 6211 } 6212 6213 void Assembler::prefixq(Address adr) { 6214 if (adr.base_needs_rex()) { 6215 if (adr.index_needs_rex()) { 6216 prefix(REX_WXB); 6217 } else { 6218 prefix(REX_WB); 6219 } 6220 } else { 6221 if (adr.index_needs_rex()) { 6222 prefix(REX_WX); 6223 } else { 6224 prefix(REX_W); 6225 } 6226 } 6227 } 6228 6229 6230 void Assembler::prefix(Address adr, Register reg, bool byteinst) { 6231 if (reg->encoding() < 8) { 6232 if (adr.base_needs_rex()) { 6233 if (adr.index_needs_rex()) { 6234 prefix(REX_XB); 6235 } else { 6236 prefix(REX_B); 6237 } 6238 } else { 6239 if (adr.index_needs_rex()) { 6240 prefix(REX_X); 6241 } else if (byteinst && reg->encoding() >= 4 ) { 6242 prefix(REX); 6243 } 6244 } 6245 } else { 6246 if (adr.base_needs_rex()) { 6247 if (adr.index_needs_rex()) { 6248 prefix(REX_RXB); 6249 } else { 6250 prefix(REX_RB); 6251 } 6252 } else { 6253 if (adr.index_needs_rex()) { 6254 prefix(REX_RX); 6255 } else { 6256 prefix(REX_R); 6257 } 6258 } 6259 } 6260 } 6261 6262 void Assembler::prefixq(Address adr, Register src) { 6263 if (src->encoding() < 8) { 6264 if (adr.base_needs_rex()) { 6265 if (adr.index_needs_rex()) { 6266 prefix(REX_WXB); 6267 } else { 6268 prefix(REX_WB); 6269 } 6270 } else { 6271 if (adr.index_needs_rex()) { 6272 prefix(REX_WX); 6273 } else { 6274 prefix(REX_W); 6275 } 6276 } 6277 } else { 6278 if (adr.base_needs_rex()) { 6279 if (adr.index_needs_rex()) { 6280 prefix(REX_WRXB); 6281 } else { 6282 prefix(REX_WRB); 6283 } 6284 } else { 6285 if (adr.index_needs_rex()) { 6286 prefix(REX_WRX); 6287 } else { 6288 prefix(REX_WR); 6289 } 6290 } 6291 } 6292 } 6293 6294 void Assembler::prefix(Address adr, XMMRegister reg) { 6295 if (reg->encoding() < 8) { 6296 if (adr.base_needs_rex()) { 6297 if (adr.index_needs_rex()) { 6298 prefix(REX_XB); 6299 } else { 6300 prefix(REX_B); 6301 } 6302 } else { 6303 if (adr.index_needs_rex()) { 6304 prefix(REX_X); 6305 } 6306 } 6307 } else { 6308 if (adr.base_needs_rex()) { 6309 if (adr.index_needs_rex()) { 6310 prefix(REX_RXB); 6311 } else { 6312 prefix(REX_RB); 6313 } 6314 } else { 6315 if (adr.index_needs_rex()) { 6316 prefix(REX_RX); 6317 } else { 6318 prefix(REX_R); 6319 } 6320 } 6321 } 6322 } 6323 6324 void Assembler::prefixq(Address adr, XMMRegister src) { 6325 if (src->encoding() < 8) { 6326 if (adr.base_needs_rex()) { 6327 if (adr.index_needs_rex()) { 6328 prefix(REX_WXB); 6329 } else { 6330 prefix(REX_WB); 6331 } 6332 } else { 6333 if (adr.index_needs_rex()) { 6334 prefix(REX_WX); 6335 } else { 6336 prefix(REX_W); 6337 } 6338 } 6339 } else { 6340 if (adr.base_needs_rex()) { 6341 if (adr.index_needs_rex()) { 6342 prefix(REX_WRXB); 6343 } else { 6344 prefix(REX_WRB); 6345 } 6346 } else { 6347 if (adr.index_needs_rex()) { 6348 prefix(REX_WRX); 6349 } else { 6350 prefix(REX_WR); 6351 } 6352 } 6353 } 6354 } 6355 6356 void Assembler::adcq(Register dst, int32_t imm32) { 6357 (void) prefixq_and_encode(dst->encoding()); 6358 emit_arith(0x81, 0xD0, dst, imm32); 6359 } 6360 6361 void Assembler::adcq(Register dst, Address src) { 6362 InstructionMark im(this); 6363 prefixq(src, dst); 6364 emit_int8(0x13); 6365 emit_operand(dst, src); 6366 } 6367 6368 void Assembler::adcq(Register dst, Register src) { 6369 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6370 emit_arith(0x13, 0xC0, dst, src); 6371 } 6372 6373 void Assembler::addq(Address dst, int32_t imm32) { 6374 InstructionMark im(this); 6375 prefixq(dst); 6376 emit_arith_operand(0x81, rax, dst,imm32); 6377 } 6378 6379 void Assembler::addq(Address dst, Register src) { 6380 InstructionMark im(this); 6381 prefixq(dst, src); 6382 emit_int8(0x01); 6383 emit_operand(src, dst); 6384 } 6385 6386 void Assembler::addq(Register dst, int32_t imm32) { 6387 (void) prefixq_and_encode(dst->encoding()); 6388 emit_arith(0x81, 0xC0, dst, imm32); 6389 } 6390 6391 void Assembler::addq(Register dst, Address src) { 6392 InstructionMark im(this); 6393 prefixq(src, dst); 6394 emit_int8(0x03); 6395 emit_operand(dst, src); 6396 } 6397 6398 void Assembler::addq(Register dst, Register src) { 6399 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6400 emit_arith(0x03, 0xC0, dst, src); 6401 } 6402 6403 void Assembler::adcxq(Register dst, Register src) { 6404 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 6405 emit_int8((unsigned char)0x66); 6406 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6407 emit_int8(0x0F); 6408 emit_int8(0x38); 6409 emit_int8((unsigned char)0xF6); 6410 emit_int8((unsigned char)(0xC0 | encode)); 6411 } 6412 6413 void Assembler::adoxq(Register dst, Register src) { 6414 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 6415 emit_int8((unsigned char)0xF3); 6416 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6417 emit_int8(0x0F); 6418 emit_int8(0x38); 6419 emit_int8((unsigned char)0xF6); 6420 emit_int8((unsigned char)(0xC0 | encode)); 6421 } 6422 6423 void Assembler::andq(Address dst, int32_t imm32) { 6424 InstructionMark im(this); 6425 prefixq(dst); 6426 emit_int8((unsigned char)0x81); 6427 emit_operand(rsp, dst, 4); 6428 emit_int32(imm32); 6429 } 6430 6431 void Assembler::andq(Register dst, int32_t imm32) { 6432 (void) prefixq_and_encode(dst->encoding()); 6433 emit_arith(0x81, 0xE0, dst, imm32); 6434 } 6435 6436 void Assembler::andq(Register dst, Address src) { 6437 InstructionMark im(this); 6438 prefixq(src, dst); 6439 emit_int8(0x23); 6440 emit_operand(dst, src); 6441 } 6442 6443 void Assembler::andq(Register dst, Register src) { 6444 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6445 emit_arith(0x23, 0xC0, dst, src); 6446 } 6447 6448 void Assembler::andnq(Register dst, Register src1, Register src2) { 6449 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6450 int encode = vex_prefix_0F38_and_encode_q_legacy(dst, src1, src2); 6451 emit_int8((unsigned char)0xF2); 6452 emit_int8((unsigned char)(0xC0 | encode)); 6453 } 6454 6455 void Assembler::andnq(Register dst, Register src1, Address src2) { 6456 InstructionMark im(this); 6457 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6458 vex_prefix_0F38_q_legacy(dst, src1, src2); 6459 emit_int8((unsigned char)0xF2); 6460 emit_operand(dst, src2); 6461 } 6462 6463 void Assembler::bsfq(Register dst, Register src) { 6464 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6465 emit_int8(0x0F); 6466 emit_int8((unsigned char)0xBC); 6467 emit_int8((unsigned char)(0xC0 | encode)); 6468 } 6469 6470 void Assembler::bsrq(Register dst, Register src) { 6471 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6472 emit_int8(0x0F); 6473 emit_int8((unsigned char)0xBD); 6474 emit_int8((unsigned char)(0xC0 | encode)); 6475 } 6476 6477 void Assembler::bswapq(Register reg) { 6478 int encode = prefixq_and_encode(reg->encoding()); 6479 emit_int8(0x0F); 6480 emit_int8((unsigned char)(0xC8 | encode)); 6481 } 6482 6483 void Assembler::blsiq(Register dst, Register src) { 6484 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6485 int encode = vex_prefix_0F38_and_encode_q_legacy(rbx, dst, src); 6486 emit_int8((unsigned char)0xF3); 6487 emit_int8((unsigned char)(0xC0 | encode)); 6488 } 6489 6490 void Assembler::blsiq(Register dst, Address src) { 6491 InstructionMark im(this); 6492 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6493 vex_prefix_0F38_q_legacy(rbx, dst, src); 6494 emit_int8((unsigned char)0xF3); 6495 emit_operand(rbx, src); 6496 } 6497 6498 void Assembler::blsmskq(Register dst, Register src) { 6499 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6500 int encode = vex_prefix_0F38_and_encode_q_legacy(rdx, dst, src); 6501 emit_int8((unsigned char)0xF3); 6502 emit_int8((unsigned char)(0xC0 | encode)); 6503 } 6504 6505 void Assembler::blsmskq(Register dst, Address src) { 6506 InstructionMark im(this); 6507 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6508 vex_prefix_0F38_q_legacy(rdx, dst, src); 6509 emit_int8((unsigned char)0xF3); 6510 emit_operand(rdx, src); 6511 } 6512 6513 void Assembler::blsrq(Register dst, Register src) { 6514 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6515 int encode = vex_prefix_0F38_and_encode_q_legacy(rcx, dst, src); 6516 emit_int8((unsigned char)0xF3); 6517 emit_int8((unsigned char)(0xC0 | encode)); 6518 } 6519 6520 void Assembler::blsrq(Register dst, Address src) { 6521 InstructionMark im(this); 6522 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6523 vex_prefix_0F38_q_legacy(rcx, dst, src); 6524 emit_int8((unsigned char)0xF3); 6525 emit_operand(rcx, src); 6526 } 6527 6528 void Assembler::cdqq() { 6529 prefix(REX_W); 6530 emit_int8((unsigned char)0x99); 6531 } 6532 6533 void Assembler::clflush(Address adr) { 6534 prefix(adr); 6535 emit_int8(0x0F); 6536 emit_int8((unsigned char)0xAE); 6537 emit_operand(rdi, adr); 6538 } 6539 6540 void Assembler::cmovq(Condition cc, Register dst, Register src) { 6541 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6542 emit_int8(0x0F); 6543 emit_int8(0x40 | cc); 6544 emit_int8((unsigned char)(0xC0 | encode)); 6545 } 6546 6547 void Assembler::cmovq(Condition cc, Register dst, Address src) { 6548 InstructionMark im(this); 6549 prefixq(src, dst); 6550 emit_int8(0x0F); 6551 emit_int8(0x40 | cc); 6552 emit_operand(dst, src); 6553 } 6554 6555 void Assembler::cmpq(Address dst, int32_t imm32) { 6556 InstructionMark im(this); 6557 prefixq(dst); 6558 emit_int8((unsigned char)0x81); 6559 emit_operand(rdi, dst, 4); 6560 emit_int32(imm32); 6561 } 6562 6563 void Assembler::cmpq(Register dst, int32_t imm32) { 6564 (void) prefixq_and_encode(dst->encoding()); 6565 emit_arith(0x81, 0xF8, dst, imm32); 6566 } 6567 6568 void Assembler::cmpq(Address dst, Register src) { 6569 InstructionMark im(this); 6570 prefixq(dst, src); 6571 emit_int8(0x3B); 6572 emit_operand(src, dst); 6573 } 6574 6575 void Assembler::cmpq(Register dst, Register src) { 6576 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6577 emit_arith(0x3B, 0xC0, dst, src); 6578 } 6579 6580 void Assembler::cmpq(Register dst, Address src) { 6581 InstructionMark im(this); 6582 prefixq(src, dst); 6583 emit_int8(0x3B); 6584 emit_operand(dst, src); 6585 } 6586 6587 void Assembler::cmpxchgq(Register reg, Address adr) { 6588 InstructionMark im(this); 6589 prefixq(adr, reg); 6590 emit_int8(0x0F); 6591 emit_int8((unsigned char)0xB1); 6592 emit_operand(reg, adr); 6593 } 6594 6595 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { 6596 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6597 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2, true); 6598 emit_int8(0x2A); 6599 emit_int8((unsigned char)(0xC0 | encode)); 6600 } 6601 6602 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { 6603 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6604 if (VM_Version::supports_evex()) { 6605 tuple_type = EVEX_T1S; 6606 input_size_in_bits = EVEX_32bit; 6607 } 6608 InstructionMark im(this); 6609 simd_prefix_q(dst, dst, src, VEX_SIMD_F2, true); 6610 emit_int8(0x2A); 6611 emit_operand(dst, src); 6612 } 6613 6614 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) { 6615 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6616 if (VM_Version::supports_evex()) { 6617 tuple_type = EVEX_T1S; 6618 input_size_in_bits = EVEX_32bit; 6619 } 6620 InstructionMark im(this); 6621 simd_prefix_q(dst, dst, src, VEX_SIMD_F3, true); 6622 emit_int8(0x2A); 6623 emit_operand(dst, src); 6624 } 6625 6626 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { 6627 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6628 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, true); 6629 emit_int8(0x2C); 6630 emit_int8((unsigned char)(0xC0 | encode)); 6631 } 6632 6633 void Assembler::cvttss2siq(Register dst, XMMRegister src) { 6634 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6635 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, true); 6636 emit_int8(0x2C); 6637 emit_int8((unsigned char)(0xC0 | encode)); 6638 } 6639 6640 void Assembler::decl(Register dst) { 6641 // Don't use it directly. Use MacroAssembler::decrementl() instead. 6642 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) 6643 int encode = prefix_and_encode(dst->encoding()); 6644 emit_int8((unsigned char)0xFF); 6645 emit_int8((unsigned char)(0xC8 | encode)); 6646 } 6647 6648 void Assembler::decq(Register dst) { 6649 // Don't use it directly. Use MacroAssembler::decrementq() instead. 6650 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 6651 int encode = prefixq_and_encode(dst->encoding()); 6652 emit_int8((unsigned char)0xFF); 6653 emit_int8(0xC8 | encode); 6654 } 6655 6656 void Assembler::decq(Address dst) { 6657 // Don't use it directly. Use MacroAssembler::decrementq() instead. 6658 InstructionMark im(this); 6659 prefixq(dst); 6660 emit_int8((unsigned char)0xFF); 6661 emit_operand(rcx, dst); 6662 } 6663 6664 void Assembler::fxrstor(Address src) { 6665 prefixq(src); 6666 emit_int8(0x0F); 6667 emit_int8((unsigned char)0xAE); 6668 emit_operand(as_Register(1), src); 6669 } 6670 6671 void Assembler::fxsave(Address dst) { 6672 prefixq(dst); 6673 emit_int8(0x0F); 6674 emit_int8((unsigned char)0xAE); 6675 emit_operand(as_Register(0), dst); 6676 } 6677 6678 void Assembler::idivq(Register src) { 6679 int encode = prefixq_and_encode(src->encoding()); 6680 emit_int8((unsigned char)0xF7); 6681 emit_int8((unsigned char)(0xF8 | encode)); 6682 } 6683 6684 void Assembler::imulq(Register dst, Register src) { 6685 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6686 emit_int8(0x0F); 6687 emit_int8((unsigned char)0xAF); 6688 emit_int8((unsigned char)(0xC0 | encode)); 6689 } 6690 6691 void Assembler::imulq(Register dst, Register src, int value) { 6692 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6693 if (is8bit(value)) { 6694 emit_int8(0x6B); 6695 emit_int8((unsigned char)(0xC0 | encode)); 6696 emit_int8(value & 0xFF); 6697 } else { 6698 emit_int8(0x69); 6699 emit_int8((unsigned char)(0xC0 | encode)); 6700 emit_int32(value); 6701 } 6702 } 6703 6704 void Assembler::imulq(Register dst, Address src) { 6705 InstructionMark im(this); 6706 prefixq(src, dst); 6707 emit_int8(0x0F); 6708 emit_int8((unsigned char) 0xAF); 6709 emit_operand(dst, src); 6710 } 6711 6712 void Assembler::incl(Register dst) { 6713 // Don't use it directly. Use MacroAssembler::incrementl() instead. 6714 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 6715 int encode = prefix_and_encode(dst->encoding()); 6716 emit_int8((unsigned char)0xFF); 6717 emit_int8((unsigned char)(0xC0 | encode)); 6718 } 6719 6720 void Assembler::incq(Register dst) { 6721 // Don't use it directly. Use MacroAssembler::incrementq() instead. 6722 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 6723 int encode = prefixq_and_encode(dst->encoding()); 6724 emit_int8((unsigned char)0xFF); 6725 emit_int8((unsigned char)(0xC0 | encode)); 6726 } 6727 6728 void Assembler::incq(Address dst) { 6729 // Don't use it directly. Use MacroAssembler::incrementq() instead. 6730 InstructionMark im(this); 6731 prefixq(dst); 6732 emit_int8((unsigned char)0xFF); 6733 emit_operand(rax, dst); 6734 } 6735 6736 void Assembler::lea(Register dst, Address src) { 6737 leaq(dst, src); 6738 } 6739 6740 void Assembler::leaq(Register dst, Address src) { 6741 InstructionMark im(this); 6742 prefixq(src, dst); 6743 emit_int8((unsigned char)0x8D); 6744 emit_operand(dst, src); 6745 } 6746 6747 void Assembler::mov64(Register dst, int64_t imm64) { 6748 InstructionMark im(this); 6749 int encode = prefixq_and_encode(dst->encoding()); 6750 emit_int8((unsigned char)(0xB8 | encode)); 6751 emit_int64(imm64); 6752 } 6753 6754 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 6755 InstructionMark im(this); 6756 int encode = prefixq_and_encode(dst->encoding()); 6757 emit_int8(0xB8 | encode); 6758 emit_data64(imm64, rspec); 6759 } 6760 6761 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { 6762 InstructionMark im(this); 6763 int encode = prefix_and_encode(dst->encoding()); 6764 emit_int8((unsigned char)(0xB8 | encode)); 6765 emit_data((int)imm32, rspec, narrow_oop_operand); 6766 } 6767 6768 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { 6769 InstructionMark im(this); 6770 prefix(dst); 6771 emit_int8((unsigned char)0xC7); 6772 emit_operand(rax, dst, 4); 6773 emit_data((int)imm32, rspec, narrow_oop_operand); 6774 } 6775 6776 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { 6777 InstructionMark im(this); 6778 int encode = prefix_and_encode(src1->encoding()); 6779 emit_int8((unsigned char)0x81); 6780 emit_int8((unsigned char)(0xF8 | encode)); 6781 emit_data((int)imm32, rspec, narrow_oop_operand); 6782 } 6783 6784 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { 6785 InstructionMark im(this); 6786 prefix(src1); 6787 emit_int8((unsigned char)0x81); 6788 emit_operand(rax, src1, 4); 6789 emit_data((int)imm32, rspec, narrow_oop_operand); 6790 } 6791 6792 void Assembler::lzcntq(Register dst, Register src) { 6793 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 6794 emit_int8((unsigned char)0xF3); 6795 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6796 emit_int8(0x0F); 6797 emit_int8((unsigned char)0xBD); 6798 emit_int8((unsigned char)(0xC0 | encode)); 6799 } 6800 6801 void Assembler::movdq(XMMRegister dst, Register src) { 6802 // table D-1 says MMX/SSE2 6803 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6804 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66, true); 6805 emit_int8(0x6E); 6806 emit_int8((unsigned char)(0xC0 | encode)); 6807 } 6808 6809 void Assembler::movdq(Register dst, XMMRegister src) { 6810 // table D-1 says MMX/SSE2 6811 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6812 // swap src/dst to get correct prefix 6813 int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66, true); 6814 emit_int8(0x7E); 6815 emit_int8((unsigned char)(0xC0 | encode)); 6816 } 6817 6818 void Assembler::movq(Register dst, Register src) { 6819 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6820 emit_int8((unsigned char)0x8B); 6821 emit_int8((unsigned char)(0xC0 | encode)); 6822 } 6823 6824 void Assembler::movq(Register dst, Address src) { 6825 InstructionMark im(this); 6826 prefixq(src, dst); 6827 emit_int8((unsigned char)0x8B); 6828 emit_operand(dst, src); 6829 } 6830 6831 void Assembler::movq(Address dst, Register src) { 6832 InstructionMark im(this); 6833 prefixq(dst, src); 6834 emit_int8((unsigned char)0x89); 6835 emit_operand(src, dst); 6836 } 6837 6838 void Assembler::movsbq(Register dst, Address src) { 6839 InstructionMark im(this); 6840 prefixq(src, dst); 6841 emit_int8(0x0F); 6842 emit_int8((unsigned char)0xBE); 6843 emit_operand(dst, src); 6844 } 6845 6846 void Assembler::movsbq(Register dst, Register src) { 6847 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6848 emit_int8(0x0F); 6849 emit_int8((unsigned char)0xBE); 6850 emit_int8((unsigned char)(0xC0 | encode)); 6851 } 6852 6853 void Assembler::movslq(Register dst, int32_t imm32) { 6854 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 6855 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 6856 // as a result we shouldn't use until tested at runtime... 6857 ShouldNotReachHere(); 6858 InstructionMark im(this); 6859 int encode = prefixq_and_encode(dst->encoding()); 6860 emit_int8((unsigned char)(0xC7 | encode)); 6861 emit_int32(imm32); 6862 } 6863 6864 void Assembler::movslq(Address dst, int32_t imm32) { 6865 assert(is_simm32(imm32), "lost bits"); 6866 InstructionMark im(this); 6867 prefixq(dst); 6868 emit_int8((unsigned char)0xC7); 6869 emit_operand(rax, dst, 4); 6870 emit_int32(imm32); 6871 } 6872 6873 void Assembler::movslq(Register dst, Address src) { 6874 InstructionMark im(this); 6875 prefixq(src, dst); 6876 emit_int8(0x63); 6877 emit_operand(dst, src); 6878 } 6879 6880 void Assembler::movslq(Register dst, Register src) { 6881 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6882 emit_int8(0x63); 6883 emit_int8((unsigned char)(0xC0 | encode)); 6884 } 6885 6886 void Assembler::movswq(Register dst, Address src) { 6887 InstructionMark im(this); 6888 prefixq(src, dst); 6889 emit_int8(0x0F); 6890 emit_int8((unsigned char)0xBF); 6891 emit_operand(dst, src); 6892 } 6893 6894 void Assembler::movswq(Register dst, Register src) { 6895 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6896 emit_int8((unsigned char)0x0F); 6897 emit_int8((unsigned char)0xBF); 6898 emit_int8((unsigned char)(0xC0 | encode)); 6899 } 6900 6901 void Assembler::movzbq(Register dst, Address src) { 6902 InstructionMark im(this); 6903 prefixq(src, dst); 6904 emit_int8((unsigned char)0x0F); 6905 emit_int8((unsigned char)0xB6); 6906 emit_operand(dst, src); 6907 } 6908 6909 void Assembler::movzbq(Register dst, Register src) { 6910 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6911 emit_int8(0x0F); 6912 emit_int8((unsigned char)0xB6); 6913 emit_int8(0xC0 | encode); 6914 } 6915 6916 void Assembler::movzwq(Register dst, Address src) { 6917 InstructionMark im(this); 6918 prefixq(src, dst); 6919 emit_int8((unsigned char)0x0F); 6920 emit_int8((unsigned char)0xB7); 6921 emit_operand(dst, src); 6922 } 6923 6924 void Assembler::movzwq(Register dst, Register src) { 6925 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6926 emit_int8((unsigned char)0x0F); 6927 emit_int8((unsigned char)0xB7); 6928 emit_int8((unsigned char)(0xC0 | encode)); 6929 } 6930 6931 void Assembler::mulq(Address src) { 6932 InstructionMark im(this); 6933 prefixq(src); 6934 emit_int8((unsigned char)0xF7); 6935 emit_operand(rsp, src); 6936 } 6937 6938 void Assembler::mulq(Register src) { 6939 int encode = prefixq_and_encode(src->encoding()); 6940 emit_int8((unsigned char)0xF7); 6941 emit_int8((unsigned char)(0xE0 | encode)); 6942 } 6943 6944 void Assembler::mulxq(Register dst1, Register dst2, Register src) { 6945 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 6946 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), 6947 VEX_SIMD_F2, VEX_OPCODE_0F_38, true, AVX_128bit, true, false); 6948 emit_int8((unsigned char)0xF6); 6949 emit_int8((unsigned char)(0xC0 | encode)); 6950 } 6951 6952 void Assembler::negq(Register dst) { 6953 int encode = prefixq_and_encode(dst->encoding()); 6954 emit_int8((unsigned char)0xF7); 6955 emit_int8((unsigned char)(0xD8 | encode)); 6956 } 6957 6958 void Assembler::notq(Register dst) { 6959 int encode = prefixq_and_encode(dst->encoding()); 6960 emit_int8((unsigned char)0xF7); 6961 emit_int8((unsigned char)(0xD0 | encode)); 6962 } 6963 6964 void Assembler::orq(Address dst, int32_t imm32) { 6965 InstructionMark im(this); 6966 prefixq(dst); 6967 emit_int8((unsigned char)0x81); 6968 emit_operand(rcx, dst, 4); 6969 emit_int32(imm32); 6970 } 6971 6972 void Assembler::orq(Register dst, int32_t imm32) { 6973 (void) prefixq_and_encode(dst->encoding()); 6974 emit_arith(0x81, 0xC8, dst, imm32); 6975 } 6976 6977 void Assembler::orq(Register dst, Address src) { 6978 InstructionMark im(this); 6979 prefixq(src, dst); 6980 emit_int8(0x0B); 6981 emit_operand(dst, src); 6982 } 6983 6984 void Assembler::orq(Register dst, Register src) { 6985 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6986 emit_arith(0x0B, 0xC0, dst, src); 6987 } 6988 6989 void Assembler::popa() { // 64bit 6990 movq(r15, Address(rsp, 0)); 6991 movq(r14, Address(rsp, wordSize)); 6992 movq(r13, Address(rsp, 2 * wordSize)); 6993 movq(r12, Address(rsp, 3 * wordSize)); 6994 movq(r11, Address(rsp, 4 * wordSize)); 6995 movq(r10, Address(rsp, 5 * wordSize)); 6996 movq(r9, Address(rsp, 6 * wordSize)); 6997 movq(r8, Address(rsp, 7 * wordSize)); 6998 movq(rdi, Address(rsp, 8 * wordSize)); 6999 movq(rsi, Address(rsp, 9 * wordSize)); 7000 movq(rbp, Address(rsp, 10 * wordSize)); 7001 // skip rsp 7002 movq(rbx, Address(rsp, 12 * wordSize)); 7003 movq(rdx, Address(rsp, 13 * wordSize)); 7004 movq(rcx, Address(rsp, 14 * wordSize)); 7005 movq(rax, Address(rsp, 15 * wordSize)); 7006 7007 addq(rsp, 16 * wordSize); 7008 } 7009 7010 void Assembler::popcntq(Register dst, Address src) { 7011 assert(VM_Version::supports_popcnt(), "must support"); 7012 InstructionMark im(this); 7013 emit_int8((unsigned char)0xF3); 7014 prefixq(src, dst); 7015 emit_int8((unsigned char)0x0F); 7016 emit_int8((unsigned char)0xB8); 7017 emit_operand(dst, src); 7018 } 7019 7020 void Assembler::popcntq(Register dst, Register src) { 7021 assert(VM_Version::supports_popcnt(), "must support"); 7022 emit_int8((unsigned char)0xF3); 7023 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7024 emit_int8((unsigned char)0x0F); 7025 emit_int8((unsigned char)0xB8); 7026 emit_int8((unsigned char)(0xC0 | encode)); 7027 } 7028 7029 void Assembler::popq(Address dst) { 7030 InstructionMark im(this); 7031 prefixq(dst); 7032 emit_int8((unsigned char)0x8F); 7033 emit_operand(rax, dst); 7034 } 7035 7036 void Assembler::pusha() { // 64bit 7037 // we have to store original rsp. ABI says that 128 bytes 7038 // below rsp are local scratch. 7039 movq(Address(rsp, -5 * wordSize), rsp); 7040 7041 subq(rsp, 16 * wordSize); 7042 7043 movq(Address(rsp, 15 * wordSize), rax); 7044 movq(Address(rsp, 14 * wordSize), rcx); 7045 movq(Address(rsp, 13 * wordSize), rdx); 7046 movq(Address(rsp, 12 * wordSize), rbx); 7047 // skip rsp 7048 movq(Address(rsp, 10 * wordSize), rbp); 7049 movq(Address(rsp, 9 * wordSize), rsi); 7050 movq(Address(rsp, 8 * wordSize), rdi); 7051 movq(Address(rsp, 7 * wordSize), r8); 7052 movq(Address(rsp, 6 * wordSize), r9); 7053 movq(Address(rsp, 5 * wordSize), r10); 7054 movq(Address(rsp, 4 * wordSize), r11); 7055 movq(Address(rsp, 3 * wordSize), r12); 7056 movq(Address(rsp, 2 * wordSize), r13); 7057 movq(Address(rsp, wordSize), r14); 7058 movq(Address(rsp, 0), r15); 7059 } 7060 7061 void Assembler::pushq(Address src) { 7062 InstructionMark im(this); 7063 prefixq(src); 7064 emit_int8((unsigned char)0xFF); 7065 emit_operand(rsi, src); 7066 } 7067 7068 void Assembler::rclq(Register dst, int imm8) { 7069 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7070 int encode = prefixq_and_encode(dst->encoding()); 7071 if (imm8 == 1) { 7072 emit_int8((unsigned char)0xD1); 7073 emit_int8((unsigned char)(0xD0 | encode)); 7074 } else { 7075 emit_int8((unsigned char)0xC1); 7076 emit_int8((unsigned char)(0xD0 | encode)); 7077 emit_int8(imm8); 7078 } 7079 } 7080 7081 void Assembler::rcrq(Register dst, int imm8) { 7082 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7083 int encode = prefixq_and_encode(dst->encoding()); 7084 if (imm8 == 1) { 7085 emit_int8((unsigned char)0xD1); 7086 emit_int8((unsigned char)(0xD8 | encode)); 7087 } else { 7088 emit_int8((unsigned char)0xC1); 7089 emit_int8((unsigned char)(0xD8 | encode)); 7090 emit_int8(imm8); 7091 } 7092 } 7093 7094 void Assembler::rorq(Register dst, int imm8) { 7095 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7096 int encode = prefixq_and_encode(dst->encoding()); 7097 if (imm8 == 1) { 7098 emit_int8((unsigned char)0xD1); 7099 emit_int8((unsigned char)(0xC8 | encode)); 7100 } else { 7101 emit_int8((unsigned char)0xC1); 7102 emit_int8((unsigned char)(0xc8 | encode)); 7103 emit_int8(imm8); 7104 } 7105 } 7106 7107 void Assembler::rorxq(Register dst, Register src, int imm8) { 7108 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 7109 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, 7110 VEX_OPCODE_0F_3A, true, AVX_128bit, true, false); 7111 emit_int8((unsigned char)0xF0); 7112 emit_int8((unsigned char)(0xC0 | encode)); 7113 emit_int8(imm8); 7114 } 7115 7116 void Assembler::sarq(Register dst, int imm8) { 7117 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7118 int encode = prefixq_and_encode(dst->encoding()); 7119 if (imm8 == 1) { 7120 emit_int8((unsigned char)0xD1); 7121 emit_int8((unsigned char)(0xF8 | encode)); 7122 } else { 7123 emit_int8((unsigned char)0xC1); 7124 emit_int8((unsigned char)(0xF8 | encode)); 7125 emit_int8(imm8); 7126 } 7127 } 7128 7129 void Assembler::sarq(Register dst) { 7130 int encode = prefixq_and_encode(dst->encoding()); 7131 emit_int8((unsigned char)0xD3); 7132 emit_int8((unsigned char)(0xF8 | encode)); 7133 } 7134 7135 void Assembler::sbbq(Address dst, int32_t imm32) { 7136 InstructionMark im(this); 7137 prefixq(dst); 7138 emit_arith_operand(0x81, rbx, dst, imm32); 7139 } 7140 7141 void Assembler::sbbq(Register dst, int32_t imm32) { 7142 (void) prefixq_and_encode(dst->encoding()); 7143 emit_arith(0x81, 0xD8, dst, imm32); 7144 } 7145 7146 void Assembler::sbbq(Register dst, Address src) { 7147 InstructionMark im(this); 7148 prefixq(src, dst); 7149 emit_int8(0x1B); 7150 emit_operand(dst, src); 7151 } 7152 7153 void Assembler::sbbq(Register dst, Register src) { 7154 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7155 emit_arith(0x1B, 0xC0, dst, src); 7156 } 7157 7158 void Assembler::shlq(Register dst, int imm8) { 7159 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7160 int encode = prefixq_and_encode(dst->encoding()); 7161 if (imm8 == 1) { 7162 emit_int8((unsigned char)0xD1); 7163 emit_int8((unsigned char)(0xE0 | encode)); 7164 } else { 7165 emit_int8((unsigned char)0xC1); 7166 emit_int8((unsigned char)(0xE0 | encode)); 7167 emit_int8(imm8); 7168 } 7169 } 7170 7171 void Assembler::shlq(Register dst) { 7172 int encode = prefixq_and_encode(dst->encoding()); 7173 emit_int8((unsigned char)0xD3); 7174 emit_int8((unsigned char)(0xE0 | encode)); 7175 } 7176 7177 void Assembler::shrq(Register dst, int imm8) { 7178 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7179 int encode = prefixq_and_encode(dst->encoding()); 7180 emit_int8((unsigned char)0xC1); 7181 emit_int8((unsigned char)(0xE8 | encode)); 7182 emit_int8(imm8); 7183 } 7184 7185 void Assembler::shrq(Register dst) { 7186 int encode = prefixq_and_encode(dst->encoding()); 7187 emit_int8((unsigned char)0xD3); 7188 emit_int8(0xE8 | encode); 7189 } 7190 7191 void Assembler::subq(Address dst, int32_t imm32) { 7192 InstructionMark im(this); 7193 prefixq(dst); 7194 emit_arith_operand(0x81, rbp, dst, imm32); 7195 } 7196 7197 void Assembler::subq(Address dst, Register src) { 7198 InstructionMark im(this); 7199 prefixq(dst, src); 7200 emit_int8(0x29); 7201 emit_operand(src, dst); 7202 } 7203 7204 void Assembler::subq(Register dst, int32_t imm32) { 7205 (void) prefixq_and_encode(dst->encoding()); 7206 emit_arith(0x81, 0xE8, dst, imm32); 7207 } 7208 7209 // Force generation of a 4 byte immediate value even if it fits into 8bit 7210 void Assembler::subq_imm32(Register dst, int32_t imm32) { 7211 (void) prefixq_and_encode(dst->encoding()); 7212 emit_arith_imm32(0x81, 0xE8, dst, imm32); 7213 } 7214 7215 void Assembler::subq(Register dst, Address src) { 7216 InstructionMark im(this); 7217 prefixq(src, dst); 7218 emit_int8(0x2B); 7219 emit_operand(dst, src); 7220 } 7221 7222 void Assembler::subq(Register dst, Register src) { 7223 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7224 emit_arith(0x2B, 0xC0, dst, src); 7225 } 7226 7227 void Assembler::testq(Register dst, int32_t imm32) { 7228 // not using emit_arith because test 7229 // doesn't support sign-extension of 7230 // 8bit operands 7231 int encode = dst->encoding(); 7232 if (encode == 0) { 7233 prefix(REX_W); 7234 emit_int8((unsigned char)0xA9); 7235 } else { 7236 encode = prefixq_and_encode(encode); 7237 emit_int8((unsigned char)0xF7); 7238 emit_int8((unsigned char)(0xC0 | encode)); 7239 } 7240 emit_int32(imm32); 7241 } 7242 7243 void Assembler::testq(Register dst, Register src) { 7244 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7245 emit_arith(0x85, 0xC0, dst, src); 7246 } 7247 7248 void Assembler::xaddq(Address dst, Register src) { 7249 InstructionMark im(this); 7250 prefixq(dst, src); 7251 emit_int8(0x0F); 7252 emit_int8((unsigned char)0xC1); 7253 emit_operand(src, dst); 7254 } 7255 7256 void Assembler::xchgq(Register dst, Address src) { 7257 InstructionMark im(this); 7258 prefixq(src, dst); 7259 emit_int8((unsigned char)0x87); 7260 emit_operand(dst, src); 7261 } 7262 7263 void Assembler::xchgq(Register dst, Register src) { 7264 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7265 emit_int8((unsigned char)0x87); 7266 emit_int8((unsigned char)(0xc0 | encode)); 7267 } 7268 7269 void Assembler::xorq(Register dst, Register src) { 7270 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7271 emit_arith(0x33, 0xC0, dst, src); 7272 } 7273 7274 void Assembler::xorq(Register dst, Address src) { 7275 InstructionMark im(this); 7276 prefixq(src, dst); 7277 emit_int8(0x33); 7278 emit_operand(dst, src); 7279 } 7280 7281 #endif // !LP64