1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "gc/shared/cardTableModRefBS.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/objectMonitor.hpp" 36 #include "runtime/os.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "utilities/macros.hpp" 40 #if INCLUDE_ALL_GCS 41 #include "gc/g1/g1CollectedHeap.inline.hpp" 42 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 43 #include "gc/g1/heapRegion.hpp" 44 #endif // INCLUDE_ALL_GCS 45 46 #ifdef PRODUCT 47 #define BLOCK_COMMENT(str) /* nothing */ 48 #define STOP(error) stop(error) 49 #else 50 #define BLOCK_COMMENT(str) block_comment(str) 51 #define STOP(error) block_comment(error); stop(error) 52 #endif 53 54 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 55 // Implementation of AddressLiteral 56 57 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms. 58 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = { 59 // -----------------Table 4.5 -------------------- // 60 16, 32, 64, // EVEX_FV(0) 61 4, 4, 4, // EVEX_FV(1) - with Evex.b 62 16, 32, 64, // EVEX_FV(2) - with Evex.w 63 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b 64 8, 16, 32, // EVEX_HV(0) 65 4, 4, 4, // EVEX_HV(1) - with Evex.b 66 // -----------------Table 4.6 -------------------- // 67 16, 32, 64, // EVEX_FVM(0) 68 1, 1, 1, // EVEX_T1S(0) 69 2, 2, 2, // EVEX_T1S(1) 70 4, 4, 4, // EVEX_T1S(2) 71 8, 8, 8, // EVEX_T1S(3) 72 4, 4, 4, // EVEX_T1F(0) 73 8, 8, 8, // EVEX_T1F(1) 74 8, 8, 8, // EVEX_T2(0) 75 0, 16, 16, // EVEX_T2(1) 76 0, 16, 16, // EVEX_T4(0) 77 0, 0, 32, // EVEX_T4(1) 78 0, 0, 32, // EVEX_T8(0) 79 8, 16, 32, // EVEX_HVM(0) 80 4, 8, 16, // EVEX_QVM(0) 81 2, 4, 8, // EVEX_OVM(0) 82 16, 16, 16, // EVEX_M128(0) 83 8, 32, 64, // EVEX_DUP(0) 84 0, 0, 0 // EVEX_NTUP 85 }; 86 87 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { 88 _is_lval = false; 89 _target = target; 90 switch (rtype) { 91 case relocInfo::oop_type: 92 case relocInfo::metadata_type: 93 // Oops are a special case. Normally they would be their own section 94 // but in cases like icBuffer they are literals in the code stream that 95 // we don't have a section for. We use none so that we get a literal address 96 // which is always patchable. 97 break; 98 case relocInfo::external_word_type: 99 _rspec = external_word_Relocation::spec(target); 100 break; 101 case relocInfo::internal_word_type: 102 _rspec = internal_word_Relocation::spec(target); 103 break; 104 case relocInfo::opt_virtual_call_type: 105 _rspec = opt_virtual_call_Relocation::spec(); 106 break; 107 case relocInfo::static_call_type: 108 _rspec = static_call_Relocation::spec(); 109 break; 110 case relocInfo::runtime_call_type: 111 _rspec = runtime_call_Relocation::spec(); 112 break; 113 case relocInfo::poll_type: 114 case relocInfo::poll_return_type: 115 _rspec = Relocation::spec_simple(rtype); 116 break; 117 case relocInfo::none: 118 break; 119 default: 120 ShouldNotReachHere(); 121 break; 122 } 123 } 124 125 // Implementation of Address 126 127 #ifdef _LP64 128 129 Address Address::make_array(ArrayAddress adr) { 130 // Not implementable on 64bit machines 131 // Should have been handled higher up the call chain. 132 ShouldNotReachHere(); 133 return Address(); 134 } 135 136 // exceedingly dangerous constructor 137 Address::Address(int disp, address loc, relocInfo::relocType rtype) { 138 _base = noreg; 139 _index = noreg; 140 _scale = no_scale; 141 _disp = disp; 142 switch (rtype) { 143 case relocInfo::external_word_type: 144 _rspec = external_word_Relocation::spec(loc); 145 break; 146 case relocInfo::internal_word_type: 147 _rspec = internal_word_Relocation::spec(loc); 148 break; 149 case relocInfo::runtime_call_type: 150 // HMM 151 _rspec = runtime_call_Relocation::spec(); 152 break; 153 case relocInfo::poll_type: 154 case relocInfo::poll_return_type: 155 _rspec = Relocation::spec_simple(rtype); 156 break; 157 case relocInfo::none: 158 break; 159 default: 160 ShouldNotReachHere(); 161 } 162 } 163 #else // LP64 164 165 Address Address::make_array(ArrayAddress adr) { 166 AddressLiteral base = adr.base(); 167 Address index = adr.index(); 168 assert(index._disp == 0, "must not have disp"); // maybe it can? 169 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); 170 array._rspec = base._rspec; 171 return array; 172 } 173 174 // exceedingly dangerous constructor 175 Address::Address(address loc, RelocationHolder spec) { 176 _base = noreg; 177 _index = noreg; 178 _scale = no_scale; 179 _disp = (intptr_t) loc; 180 _rspec = spec; 181 } 182 183 #endif // _LP64 184 185 186 187 // Convert the raw encoding form into the form expected by the constructor for 188 // Address. An index of 4 (rsp) corresponds to having no index, so convert 189 // that to noreg for the Address constructor. 190 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 191 RelocationHolder rspec; 192 if (disp_reloc != relocInfo::none) { 193 rspec = Relocation::spec_simple(disp_reloc); 194 } 195 bool valid_index = index != rsp->encoding(); 196 if (valid_index) { 197 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 198 madr._rspec = rspec; 199 return madr; 200 } else { 201 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 202 madr._rspec = rspec; 203 return madr; 204 } 205 } 206 207 // Implementation of Assembler 208 209 int AbstractAssembler::code_fill_byte() { 210 return (u_char)'\xF4'; // hlt 211 } 212 213 // make this go away someday 214 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { 215 if (rtype == relocInfo::none) 216 emit_int32(data); 217 else 218 emit_data(data, Relocation::spec_simple(rtype), format); 219 } 220 221 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { 222 assert(imm_operand == 0, "default format must be immediate in this file"); 223 assert(inst_mark() != NULL, "must be inside InstructionMark"); 224 if (rspec.type() != relocInfo::none) { 225 #ifdef ASSERT 226 check_relocation(rspec, format); 227 #endif 228 // Do not use AbstractAssembler::relocate, which is not intended for 229 // embedded words. Instead, relocate to the enclosing instruction. 230 231 // hack. call32 is too wide for mask so use disp32 232 if (format == call32_operand) 233 code_section()->relocate(inst_mark(), rspec, disp32_operand); 234 else 235 code_section()->relocate(inst_mark(), rspec, format); 236 } 237 emit_int32(data); 238 } 239 240 static int encode(Register r) { 241 int enc = r->encoding(); 242 if (enc >= 8) { 243 enc -= 8; 244 } 245 return enc; 246 } 247 248 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { 249 assert(dst->has_byte_register(), "must have byte register"); 250 assert(isByte(op1) && isByte(op2), "wrong opcode"); 251 assert(isByte(imm8), "not a byte"); 252 assert((op1 & 0x01) == 0, "should be 8bit operation"); 253 emit_int8(op1); 254 emit_int8(op2 | encode(dst)); 255 emit_int8(imm8); 256 } 257 258 259 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { 260 assert(isByte(op1) && isByte(op2), "wrong opcode"); 261 assert((op1 & 0x01) == 1, "should be 32bit operation"); 262 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 263 if (is8bit(imm32)) { 264 emit_int8(op1 | 0x02); // set sign bit 265 emit_int8(op2 | encode(dst)); 266 emit_int8(imm32 & 0xFF); 267 } else { 268 emit_int8(op1); 269 emit_int8(op2 | encode(dst)); 270 emit_int32(imm32); 271 } 272 } 273 274 // Force generation of a 4 byte immediate value even if it fits into 8bit 275 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { 276 assert(isByte(op1) && isByte(op2), "wrong opcode"); 277 assert((op1 & 0x01) == 1, "should be 32bit operation"); 278 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 279 emit_int8(op1); 280 emit_int8(op2 | encode(dst)); 281 emit_int32(imm32); 282 } 283 284 // immediate-to-memory forms 285 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { 286 assert((op1 & 0x01) == 1, "should be 32bit operation"); 287 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 288 if (is8bit(imm32)) { 289 emit_int8(op1 | 0x02); // set sign bit 290 emit_operand(rm, adr, 1); 291 emit_int8(imm32 & 0xFF); 292 } else { 293 emit_int8(op1); 294 emit_operand(rm, adr, 4); 295 emit_int32(imm32); 296 } 297 } 298 299 300 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { 301 assert(isByte(op1) && isByte(op2), "wrong opcode"); 302 emit_int8(op1); 303 emit_int8(op2 | encode(dst) << 3 | encode(src)); 304 } 305 306 307 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 308 int cur_tuple_type, int in_size_in_bits, int cur_encoding) { 309 int mod_idx = 0; 310 // We will test if the displacement fits the compressed format and if so 311 // apply the compression to the displacment iff the result is8bit. 312 if (VM_Version::supports_evex() && is_evex_inst) { 313 switch (cur_tuple_type) { 314 case EVEX_FV: 315 if ((cur_encoding & VEX_W) == VEX_W) { 316 mod_idx += 2 + ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 317 } else { 318 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 319 } 320 break; 321 322 case EVEX_HV: 323 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 324 break; 325 326 case EVEX_FVM: 327 break; 328 329 case EVEX_T1S: 330 switch (in_size_in_bits) { 331 case EVEX_8bit: 332 break; 333 334 case EVEX_16bit: 335 mod_idx = 1; 336 break; 337 338 case EVEX_32bit: 339 mod_idx = 2; 340 break; 341 342 case EVEX_64bit: 343 mod_idx = 3; 344 break; 345 } 346 break; 347 348 case EVEX_T1F: 349 case EVEX_T2: 350 case EVEX_T4: 351 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0; 352 break; 353 354 case EVEX_T8: 355 break; 356 357 case EVEX_HVM: 358 break; 359 360 case EVEX_QVM: 361 break; 362 363 case EVEX_OVM: 364 break; 365 366 case EVEX_M128: 367 break; 368 369 case EVEX_DUP: 370 break; 371 372 default: 373 assert(0, "no valid evex tuple_table entry"); 374 break; 375 } 376 377 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 378 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len]; 379 if ((disp % disp_factor) == 0) { 380 int new_disp = disp / disp_factor; 381 if ((-0x80 <= new_disp && new_disp < 0x80)) { 382 disp = new_disp; 383 } 384 } else { 385 return false; 386 } 387 } 388 } 389 return (-0x80 <= disp && disp < 0x80); 390 } 391 392 393 bool Assembler::emit_compressed_disp_byte(int &disp) { 394 int mod_idx = 0; 395 // We will test if the displacement fits the compressed format and if so 396 // apply the compression to the displacment iff the result is8bit. 397 if (VM_Version::supports_evex() && is_evex_instruction) { 398 switch (tuple_type) { 399 case EVEX_FV: 400 if ((evex_encoding & VEX_W) == VEX_W) { 401 mod_idx += 2 + ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 402 } else { 403 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 404 } 405 break; 406 407 case EVEX_HV: 408 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 409 break; 410 411 case EVEX_FVM: 412 break; 413 414 case EVEX_T1S: 415 switch (input_size_in_bits) { 416 case EVEX_8bit: 417 break; 418 419 case EVEX_16bit: 420 mod_idx = 1; 421 break; 422 423 case EVEX_32bit: 424 mod_idx = 2; 425 break; 426 427 case EVEX_64bit: 428 mod_idx = 3; 429 break; 430 } 431 break; 432 433 case EVEX_T1F: 434 case EVEX_T2: 435 case EVEX_T4: 436 mod_idx = (input_size_in_bits == EVEX_64bit) ? 1 : 0; 437 break; 438 439 case EVEX_T8: 440 break; 441 442 case EVEX_HVM: 443 break; 444 445 case EVEX_QVM: 446 break; 447 448 case EVEX_OVM: 449 break; 450 451 case EVEX_M128: 452 break; 453 454 case EVEX_DUP: 455 break; 456 457 default: 458 assert(0, "no valid evex tuple_table entry"); 459 break; 460 } 461 462 if (avx_vector_len >= AVX_128bit && avx_vector_len <= AVX_512bit) { 463 int disp_factor = tuple_table[tuple_type + mod_idx][avx_vector_len]; 464 if ((disp % disp_factor) == 0) { 465 int new_disp = disp / disp_factor; 466 if (is8bit(new_disp)) { 467 disp = new_disp; 468 } 469 } else { 470 return false; 471 } 472 } 473 } 474 return is8bit(disp); 475 } 476 477 478 void Assembler::emit_operand(Register reg, Register base, Register index, 479 Address::ScaleFactor scale, int disp, 480 RelocationHolder const& rspec, 481 int rip_relative_correction) { 482 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); 483 484 // Encode the registers as needed in the fields they are used in 485 486 int regenc = encode(reg) << 3; 487 int indexenc = index->is_valid() ? encode(index) << 3 : 0; 488 int baseenc = base->is_valid() ? encode(base) : 0; 489 490 if (base->is_valid()) { 491 if (index->is_valid()) { 492 assert(scale != Address::no_scale, "inconsistent address"); 493 // [base + index*scale + disp] 494 if (disp == 0 && rtype == relocInfo::none && 495 base != rbp LP64_ONLY(&& base != r13)) { 496 // [base + index*scale] 497 // [00 reg 100][ss index base] 498 assert(index != rsp, "illegal addressing mode"); 499 emit_int8(0x04 | regenc); 500 emit_int8(scale << 6 | indexenc | baseenc); 501 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 502 // [base + index*scale + imm8] 503 // [01 reg 100][ss index base] imm8 504 assert(index != rsp, "illegal addressing mode"); 505 emit_int8(0x44 | regenc); 506 emit_int8(scale << 6 | indexenc | baseenc); 507 emit_int8(disp & 0xFF); 508 } else { 509 // [base + index*scale + disp32] 510 // [10 reg 100][ss index base] disp32 511 assert(index != rsp, "illegal addressing mode"); 512 emit_int8(0x84 | regenc); 513 emit_int8(scale << 6 | indexenc | baseenc); 514 emit_data(disp, rspec, disp32_operand); 515 } 516 } else if (base == rsp LP64_ONLY(|| base == r12)) { 517 // [rsp + disp] 518 if (disp == 0 && rtype == relocInfo::none) { 519 // [rsp] 520 // [00 reg 100][00 100 100] 521 emit_int8(0x04 | regenc); 522 emit_int8(0x24); 523 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 524 // [rsp + imm8] 525 // [01 reg 100][00 100 100] disp8 526 emit_int8(0x44 | regenc); 527 emit_int8(0x24); 528 emit_int8(disp & 0xFF); 529 } else { 530 // [rsp + imm32] 531 // [10 reg 100][00 100 100] disp32 532 emit_int8(0x84 | regenc); 533 emit_int8(0x24); 534 emit_data(disp, rspec, disp32_operand); 535 } 536 } else { 537 // [base + disp] 538 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); 539 if (disp == 0 && rtype == relocInfo::none && 540 base != rbp LP64_ONLY(&& base != r13)) { 541 // [base] 542 // [00 reg base] 543 emit_int8(0x00 | regenc | baseenc); 544 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { 545 // [base + disp8] 546 // [01 reg base] disp8 547 emit_int8(0x40 | regenc | baseenc); 548 emit_int8(disp & 0xFF); 549 } else { 550 // [base + disp32] 551 // [10 reg base] disp32 552 emit_int8(0x80 | regenc | baseenc); 553 emit_data(disp, rspec, disp32_operand); 554 } 555 } 556 } else { 557 if (index->is_valid()) { 558 assert(scale != Address::no_scale, "inconsistent address"); 559 // [index*scale + disp] 560 // [00 reg 100][ss index 101] disp32 561 assert(index != rsp, "illegal addressing mode"); 562 emit_int8(0x04 | regenc); 563 emit_int8(scale << 6 | indexenc | 0x05); 564 emit_data(disp, rspec, disp32_operand); 565 } else if (rtype != relocInfo::none ) { 566 // [disp] (64bit) RIP-RELATIVE (32bit) abs 567 // [00 000 101] disp32 568 569 emit_int8(0x05 | regenc); 570 // Note that the RIP-rel. correction applies to the generated 571 // disp field, but _not_ to the target address in the rspec. 572 573 // disp was created by converting the target address minus the pc 574 // at the start of the instruction. That needs more correction here. 575 // intptr_t disp = target - next_ip; 576 assert(inst_mark() != NULL, "must be inside InstructionMark"); 577 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; 578 int64_t adjusted = disp; 579 // Do rip-rel adjustment for 64bit 580 LP64_ONLY(adjusted -= (next_ip - inst_mark())); 581 assert(is_simm32(adjusted), 582 "must be 32bit offset (RIP relative address)"); 583 emit_data((int32_t) adjusted, rspec, disp32_operand); 584 585 } else { 586 // 32bit never did this, did everything as the rip-rel/disp code above 587 // [disp] ABSOLUTE 588 // [00 reg 100][00 100 101] disp32 589 emit_int8(0x04 | regenc); 590 emit_int8(0x25); 591 emit_data(disp, rspec, disp32_operand); 592 } 593 } 594 is_evex_instruction = false; 595 } 596 597 void Assembler::emit_operand(XMMRegister reg, Register base, Register index, 598 Address::ScaleFactor scale, int disp, 599 RelocationHolder const& rspec) { 600 if (UseAVX > 2) { 601 int xreg_enc = reg->encoding(); 602 if (xreg_enc > 15) { 603 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf); 604 emit_operand((Register)new_reg, base, index, scale, disp, rspec); 605 return; 606 } 607 } 608 emit_operand((Register)reg, base, index, scale, disp, rspec); 609 } 610 611 // Secret local extension to Assembler::WhichOperand: 612 #define end_pc_operand (_WhichOperand_limit) 613 614 address Assembler::locate_operand(address inst, WhichOperand which) { 615 // Decode the given instruction, and return the address of 616 // an embedded 32-bit operand word. 617 618 // If "which" is disp32_operand, selects the displacement portion 619 // of an effective address specifier. 620 // If "which" is imm64_operand, selects the trailing immediate constant. 621 // If "which" is call32_operand, selects the displacement of a call or jump. 622 // Caller is responsible for ensuring that there is such an operand, 623 // and that it is 32/64 bits wide. 624 625 // If "which" is end_pc_operand, find the end of the instruction. 626 627 address ip = inst; 628 bool is_64bit = false; 629 630 debug_only(bool has_disp32 = false); 631 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn 632 633 again_after_prefix: 634 switch (0xFF & *ip++) { 635 636 // These convenience macros generate groups of "case" labels for the switch. 637 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 638 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ 639 case (x)+4: case (x)+5: case (x)+6: case (x)+7 640 #define REP16(x) REP8((x)+0): \ 641 case REP8((x)+8) 642 643 case CS_segment: 644 case SS_segment: 645 case DS_segment: 646 case ES_segment: 647 case FS_segment: 648 case GS_segment: 649 // Seems dubious 650 LP64_ONLY(assert(false, "shouldn't have that prefix")); 651 assert(ip == inst+1, "only one prefix allowed"); 652 goto again_after_prefix; 653 654 case 0x67: 655 case REX: 656 case REX_B: 657 case REX_X: 658 case REX_XB: 659 case REX_R: 660 case REX_RB: 661 case REX_RX: 662 case REX_RXB: 663 NOT_LP64(assert(false, "64bit prefixes")); 664 goto again_after_prefix; 665 666 case REX_W: 667 case REX_WB: 668 case REX_WX: 669 case REX_WXB: 670 case REX_WR: 671 case REX_WRB: 672 case REX_WRX: 673 case REX_WRXB: 674 NOT_LP64(assert(false, "64bit prefixes")); 675 is_64bit = true; 676 goto again_after_prefix; 677 678 case 0xFF: // pushq a; decl a; incl a; call a; jmp a 679 case 0x88: // movb a, r 680 case 0x89: // movl a, r 681 case 0x8A: // movb r, a 682 case 0x8B: // movl r, a 683 case 0x8F: // popl a 684 debug_only(has_disp32 = true); 685 break; 686 687 case 0x68: // pushq #32 688 if (which == end_pc_operand) { 689 return ip + 4; 690 } 691 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); 692 return ip; // not produced by emit_operand 693 694 case 0x66: // movw ... (size prefix) 695 again_after_size_prefix2: 696 switch (0xFF & *ip++) { 697 case REX: 698 case REX_B: 699 case REX_X: 700 case REX_XB: 701 case REX_R: 702 case REX_RB: 703 case REX_RX: 704 case REX_RXB: 705 case REX_W: 706 case REX_WB: 707 case REX_WX: 708 case REX_WXB: 709 case REX_WR: 710 case REX_WRB: 711 case REX_WRX: 712 case REX_WRXB: 713 NOT_LP64(assert(false, "64bit prefix found")); 714 goto again_after_size_prefix2; 715 case 0x8B: // movw r, a 716 case 0x89: // movw a, r 717 debug_only(has_disp32 = true); 718 break; 719 case 0xC7: // movw a, #16 720 debug_only(has_disp32 = true); 721 tail_size = 2; // the imm16 722 break; 723 case 0x0F: // several SSE/SSE2 variants 724 ip--; // reparse the 0x0F 725 goto again_after_prefix; 726 default: 727 ShouldNotReachHere(); 728 } 729 break; 730 731 case REP8(0xB8): // movl/q r, #32/#64(oop?) 732 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); 733 // these asserts are somewhat nonsensical 734 #ifndef _LP64 735 assert(which == imm_operand || which == disp32_operand, 736 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip))); 737 #else 738 assert((which == call32_operand || which == imm_operand) && is_64bit || 739 which == narrow_oop_operand && !is_64bit, 740 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip))); 741 #endif // _LP64 742 return ip; 743 744 case 0x69: // imul r, a, #32 745 case 0xC7: // movl a, #32(oop?) 746 tail_size = 4; 747 debug_only(has_disp32 = true); // has both kinds of operands! 748 break; 749 750 case 0x0F: // movx..., etc. 751 switch (0xFF & *ip++) { 752 case 0x3A: // pcmpestri 753 tail_size = 1; 754 case 0x38: // ptest, pmovzxbw 755 ip++; // skip opcode 756 debug_only(has_disp32 = true); // has both kinds of operands! 757 break; 758 759 case 0x70: // pshufd r, r/a, #8 760 debug_only(has_disp32 = true); // has both kinds of operands! 761 case 0x73: // psrldq r, #8 762 tail_size = 1; 763 break; 764 765 case 0x12: // movlps 766 case 0x28: // movaps 767 case 0x2E: // ucomiss 768 case 0x2F: // comiss 769 case 0x54: // andps 770 case 0x55: // andnps 771 case 0x56: // orps 772 case 0x57: // xorps 773 case 0x6E: // movd 774 case 0x7E: // movd 775 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush 776 debug_only(has_disp32 = true); 777 break; 778 779 case 0xAD: // shrd r, a, %cl 780 case 0xAF: // imul r, a 781 case 0xBE: // movsbl r, a (movsxb) 782 case 0xBF: // movswl r, a (movsxw) 783 case 0xB6: // movzbl r, a (movzxb) 784 case 0xB7: // movzwl r, a (movzxw) 785 case REP16(0x40): // cmovl cc, r, a 786 case 0xB0: // cmpxchgb 787 case 0xB1: // cmpxchg 788 case 0xC1: // xaddl 789 case 0xC7: // cmpxchg8 790 case REP16(0x90): // setcc a 791 debug_only(has_disp32 = true); 792 // fall out of the switch to decode the address 793 break; 794 795 case 0xC4: // pinsrw r, a, #8 796 debug_only(has_disp32 = true); 797 case 0xC5: // pextrw r, r, #8 798 tail_size = 1; // the imm8 799 break; 800 801 case 0xAC: // shrd r, a, #8 802 debug_only(has_disp32 = true); 803 tail_size = 1; // the imm8 804 break; 805 806 case REP16(0x80): // jcc rdisp32 807 if (which == end_pc_operand) return ip + 4; 808 assert(which == call32_operand, "jcc has no disp32 or imm"); 809 return ip; 810 default: 811 ShouldNotReachHere(); 812 } 813 break; 814 815 case 0x81: // addl a, #32; addl r, #32 816 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 817 // on 32bit in the case of cmpl, the imm might be an oop 818 tail_size = 4; 819 debug_only(has_disp32 = true); // has both kinds of operands! 820 break; 821 822 case 0x83: // addl a, #8; addl r, #8 823 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 824 debug_only(has_disp32 = true); // has both kinds of operands! 825 tail_size = 1; 826 break; 827 828 case 0x9B: 829 switch (0xFF & *ip++) { 830 case 0xD9: // fnstcw a 831 debug_only(has_disp32 = true); 832 break; 833 default: 834 ShouldNotReachHere(); 835 } 836 break; 837 838 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a 839 case REP4(0x10): // adc... 840 case REP4(0x20): // and... 841 case REP4(0x30): // xor... 842 case REP4(0x08): // or... 843 case REP4(0x18): // sbb... 844 case REP4(0x28): // sub... 845 case 0xF7: // mull a 846 case 0x8D: // lea r, a 847 case 0x87: // xchg r, a 848 case REP4(0x38): // cmp... 849 case 0x85: // test r, a 850 debug_only(has_disp32 = true); // has both kinds of operands! 851 break; 852 853 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 854 case 0xC6: // movb a, #8 855 case 0x80: // cmpb a, #8 856 case 0x6B: // imul r, a, #8 857 debug_only(has_disp32 = true); // has both kinds of operands! 858 tail_size = 1; // the imm8 859 break; 860 861 case 0xC4: // VEX_3bytes 862 case 0xC5: // VEX_2bytes 863 assert((UseAVX > 0), "shouldn't have VEX prefix"); 864 assert(ip == inst+1, "no prefixes allowed"); 865 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions 866 // but they have prefix 0x0F and processed when 0x0F processed above. 867 // 868 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES 869 // instructions (these instructions are not supported in 64-bit mode). 870 // To distinguish them bits [7:6] are set in the VEX second byte since 871 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set 872 // those VEX bits REX and vvvv bits are inverted. 873 // 874 // Fortunately C2 doesn't generate these instructions so we don't need 875 // to check for them in product version. 876 877 // Check second byte 878 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions")); 879 880 // First byte 881 if ((0xFF & *inst) == VEX_3bytes) { 882 ip++; // third byte 883 is_64bit = ((VEX_W & *ip) == VEX_W); 884 } 885 ip++; // opcode 886 // To find the end of instruction (which == end_pc_operand). 887 switch (0xFF & *ip) { 888 case 0x61: // pcmpestri r, r/a, #8 889 case 0x70: // pshufd r, r/a, #8 890 case 0x73: // psrldq r, #8 891 tail_size = 1; // the imm8 892 break; 893 default: 894 break; 895 } 896 ip++; // skip opcode 897 debug_only(has_disp32 = true); // has both kinds of operands! 898 break; 899 900 case 0x62: // EVEX_4bytes 901 assert((UseAVX > 0), "shouldn't have EVEX prefix"); 902 assert(ip == inst+1, "no prefixes allowed"); 903 // no EVEX collisions, all instructions that have 0x62 opcodes 904 // have EVEX versions and are subopcodes of 0x66 905 ip++; // skip P0 and exmaine W in P1 906 is_64bit = ((VEX_W & *ip) == VEX_W); 907 ip++; // move to P2 908 ip++; // skip P2, move to opcode 909 // To find the end of instruction (which == end_pc_operand). 910 switch (0xFF & *ip) { 911 case 0x61: // pcmpestri r, r/a, #8 912 case 0x70: // pshufd r, r/a, #8 913 case 0x73: // psrldq r, #8 914 tail_size = 1; // the imm8 915 break; 916 default: 917 break; 918 } 919 ip++; // skip opcode 920 debug_only(has_disp32 = true); // has both kinds of operands! 921 break; 922 923 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 924 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl 925 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a 926 case 0xDD: // fld_d a; fst_d a; fstp_d a 927 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a 928 case 0xDF: // fild_d a; fistp_d a 929 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a 930 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a 931 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a 932 debug_only(has_disp32 = true); 933 break; 934 935 case 0xE8: // call rdisp32 936 case 0xE9: // jmp rdisp32 937 if (which == end_pc_operand) return ip + 4; 938 assert(which == call32_operand, "call has no disp32 or imm"); 939 return ip; 940 941 case 0xF0: // Lock 942 assert(os::is_MP(), "only on MP"); 943 goto again_after_prefix; 944 945 case 0xF3: // For SSE 946 case 0xF2: // For SSE2 947 switch (0xFF & *ip++) { 948 case REX: 949 case REX_B: 950 case REX_X: 951 case REX_XB: 952 case REX_R: 953 case REX_RB: 954 case REX_RX: 955 case REX_RXB: 956 case REX_W: 957 case REX_WB: 958 case REX_WX: 959 case REX_WXB: 960 case REX_WR: 961 case REX_WRB: 962 case REX_WRX: 963 case REX_WRXB: 964 NOT_LP64(assert(false, "found 64bit prefix")); 965 ip++; 966 default: 967 ip++; 968 } 969 debug_only(has_disp32 = true); // has both kinds of operands! 970 break; 971 972 default: 973 ShouldNotReachHere(); 974 975 #undef REP8 976 #undef REP16 977 } 978 979 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); 980 #ifdef _LP64 981 assert(which != imm_operand, "instruction is not a movq reg, imm64"); 982 #else 983 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); 984 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); 985 #endif // LP64 986 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); 987 988 // parse the output of emit_operand 989 int op2 = 0xFF & *ip++; 990 int base = op2 & 0x07; 991 int op3 = -1; 992 const int b100 = 4; 993 const int b101 = 5; 994 if (base == b100 && (op2 >> 6) != 3) { 995 op3 = 0xFF & *ip++; 996 base = op3 & 0x07; // refetch the base 997 } 998 // now ip points at the disp (if any) 999 1000 switch (op2 >> 6) { 1001 case 0: 1002 // [00 reg 100][ss index base] 1003 // [00 reg 100][00 100 esp] 1004 // [00 reg base] 1005 // [00 reg 100][ss index 101][disp32] 1006 // [00 reg 101] [disp32] 1007 1008 if (base == b101) { 1009 if (which == disp32_operand) 1010 return ip; // caller wants the disp32 1011 ip += 4; // skip the disp32 1012 } 1013 break; 1014 1015 case 1: 1016 // [01 reg 100][ss index base][disp8] 1017 // [01 reg 100][00 100 esp][disp8] 1018 // [01 reg base] [disp8] 1019 ip += 1; // skip the disp8 1020 break; 1021 1022 case 2: 1023 // [10 reg 100][ss index base][disp32] 1024 // [10 reg 100][00 100 esp][disp32] 1025 // [10 reg base] [disp32] 1026 if (which == disp32_operand) 1027 return ip; // caller wants the disp32 1028 ip += 4; // skip the disp32 1029 break; 1030 1031 case 3: 1032 // [11 reg base] (not a memory addressing mode) 1033 break; 1034 } 1035 1036 if (which == end_pc_operand) { 1037 return ip + tail_size; 1038 } 1039 1040 #ifdef _LP64 1041 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); 1042 #else 1043 assert(which == imm_operand, "instruction has only an imm field"); 1044 #endif // LP64 1045 return ip; 1046 } 1047 1048 address Assembler::locate_next_instruction(address inst) { 1049 // Secretly share code with locate_operand: 1050 return locate_operand(inst, end_pc_operand); 1051 } 1052 1053 1054 #ifdef ASSERT 1055 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { 1056 address inst = inst_mark(); 1057 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); 1058 address opnd; 1059 1060 Relocation* r = rspec.reloc(); 1061 if (r->type() == relocInfo::none) { 1062 return; 1063 } else if (r->is_call() || format == call32_operand) { 1064 // assert(format == imm32_operand, "cannot specify a nonzero format"); 1065 opnd = locate_operand(inst, call32_operand); 1066 } else if (r->is_data()) { 1067 assert(format == imm_operand || format == disp32_operand 1068 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); 1069 opnd = locate_operand(inst, (WhichOperand)format); 1070 } else { 1071 assert(format == imm_operand, "cannot specify a format"); 1072 return; 1073 } 1074 assert(opnd == pc(), "must put operand where relocs can find it"); 1075 } 1076 #endif // ASSERT 1077 1078 void Assembler::emit_operand32(Register reg, Address adr) { 1079 assert(reg->encoding() < 8, "no extended registers"); 1080 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1081 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1082 adr._rspec); 1083 } 1084 1085 void Assembler::emit_operand(Register reg, Address adr, 1086 int rip_relative_correction) { 1087 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1088 adr._rspec, 1089 rip_relative_correction); 1090 } 1091 1092 void Assembler::emit_operand(XMMRegister reg, Address adr) { 1093 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1094 adr._rspec); 1095 } 1096 1097 // MMX operations 1098 void Assembler::emit_operand(MMXRegister reg, Address adr) { 1099 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1100 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1101 } 1102 1103 // work around gcc (3.2.1-7a) bug 1104 void Assembler::emit_operand(Address adr, MMXRegister reg) { 1105 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1106 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1107 } 1108 1109 1110 void Assembler::emit_farith(int b1, int b2, int i) { 1111 assert(isByte(b1) && isByte(b2), "wrong opcode"); 1112 assert(0 <= i && i < 8, "illegal stack offset"); 1113 emit_int8(b1); 1114 emit_int8(b2 + i); 1115 } 1116 1117 1118 // Now the Assembler instructions (identical for 32/64 bits) 1119 1120 void Assembler::adcl(Address dst, int32_t imm32) { 1121 InstructionMark im(this); 1122 prefix(dst); 1123 emit_arith_operand(0x81, rdx, dst, imm32); 1124 } 1125 1126 void Assembler::adcl(Address dst, Register src) { 1127 InstructionMark im(this); 1128 prefix(dst, src); 1129 emit_int8(0x11); 1130 emit_operand(src, dst); 1131 } 1132 1133 void Assembler::adcl(Register dst, int32_t imm32) { 1134 prefix(dst); 1135 emit_arith(0x81, 0xD0, dst, imm32); 1136 } 1137 1138 void Assembler::adcl(Register dst, Address src) { 1139 InstructionMark im(this); 1140 prefix(src, dst); 1141 emit_int8(0x13); 1142 emit_operand(dst, src); 1143 } 1144 1145 void Assembler::adcl(Register dst, Register src) { 1146 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1147 emit_arith(0x13, 0xC0, dst, src); 1148 } 1149 1150 void Assembler::addl(Address dst, int32_t imm32) { 1151 InstructionMark im(this); 1152 prefix(dst); 1153 emit_arith_operand(0x81, rax, dst, imm32); 1154 } 1155 1156 void Assembler::addl(Address dst, Register src) { 1157 InstructionMark im(this); 1158 prefix(dst, src); 1159 emit_int8(0x01); 1160 emit_operand(src, dst); 1161 } 1162 1163 void Assembler::addl(Register dst, int32_t imm32) { 1164 prefix(dst); 1165 emit_arith(0x81, 0xC0, dst, imm32); 1166 } 1167 1168 void Assembler::addl(Register dst, Address src) { 1169 InstructionMark im(this); 1170 prefix(src, dst); 1171 emit_int8(0x03); 1172 emit_operand(dst, src); 1173 } 1174 1175 void Assembler::addl(Register dst, Register src) { 1176 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1177 emit_arith(0x03, 0xC0, dst, src); 1178 } 1179 1180 void Assembler::addr_nop_4() { 1181 assert(UseAddressNop, "no CPU support"); 1182 // 4 bytes: NOP DWORD PTR [EAX+0] 1183 emit_int8(0x0F); 1184 emit_int8(0x1F); 1185 emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); 1186 emit_int8(0); // 8-bits offset (1 byte) 1187 } 1188 1189 void Assembler::addr_nop_5() { 1190 assert(UseAddressNop, "no CPU support"); 1191 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset 1192 emit_int8(0x0F); 1193 emit_int8(0x1F); 1194 emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); 1195 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1196 emit_int8(0); // 8-bits offset (1 byte) 1197 } 1198 1199 void Assembler::addr_nop_7() { 1200 assert(UseAddressNop, "no CPU support"); 1201 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset 1202 emit_int8(0x0F); 1203 emit_int8(0x1F); 1204 emit_int8((unsigned char)0x80); 1205 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); 1206 emit_int32(0); // 32-bits offset (4 bytes) 1207 } 1208 1209 void Assembler::addr_nop_8() { 1210 assert(UseAddressNop, "no CPU support"); 1211 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset 1212 emit_int8(0x0F); 1213 emit_int8(0x1F); 1214 emit_int8((unsigned char)0x84); 1215 // emit_rm(cbuf, 0x2, EAX_enc, 0x4); 1216 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1217 emit_int32(0); // 32-bits offset (4 bytes) 1218 } 1219 1220 void Assembler::addsd(XMMRegister dst, XMMRegister src) { 1221 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1222 if (VM_Version::supports_evex()) { 1223 emit_simd_arith_q(0x58, dst, src, VEX_SIMD_F2); 1224 } else { 1225 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2); 1226 } 1227 } 1228 1229 void Assembler::addsd(XMMRegister dst, Address src) { 1230 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1231 if (VM_Version::supports_evex()) { 1232 tuple_type = EVEX_T1S; 1233 input_size_in_bits = EVEX_64bit; 1234 emit_simd_arith_q(0x58, dst, src, VEX_SIMD_F2); 1235 } else { 1236 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2); 1237 } 1238 } 1239 1240 void Assembler::addss(XMMRegister dst, XMMRegister src) { 1241 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1242 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3); 1243 } 1244 1245 void Assembler::addss(XMMRegister dst, Address src) { 1246 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1247 if (VM_Version::supports_evex()) { 1248 tuple_type = EVEX_T1S; 1249 input_size_in_bits = EVEX_32bit; 1250 } 1251 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3); 1252 } 1253 1254 void Assembler::aesdec(XMMRegister dst, Address src) { 1255 assert(VM_Version::supports_aes(), ""); 1256 InstructionMark im(this); 1257 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1258 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1259 emit_int8((unsigned char)0xDE); 1260 emit_operand(dst, src); 1261 } 1262 1263 void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1264 assert(VM_Version::supports_aes(), ""); 1265 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1266 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1267 emit_int8((unsigned char)0xDE); 1268 emit_int8(0xC0 | encode); 1269 } 1270 1271 void Assembler::aesdeclast(XMMRegister dst, Address src) { 1272 assert(VM_Version::supports_aes(), ""); 1273 InstructionMark im(this); 1274 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1275 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1276 emit_int8((unsigned char)0xDF); 1277 emit_operand(dst, src); 1278 } 1279 1280 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1281 assert(VM_Version::supports_aes(), ""); 1282 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1283 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1284 emit_int8((unsigned char)0xDF); 1285 emit_int8((unsigned char)(0xC0 | encode)); 1286 } 1287 1288 void Assembler::aesenc(XMMRegister dst, Address src) { 1289 assert(VM_Version::supports_aes(), ""); 1290 InstructionMark im(this); 1291 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1292 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1293 emit_int8((unsigned char)0xDC); 1294 emit_operand(dst, src); 1295 } 1296 1297 void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1298 assert(VM_Version::supports_aes(), ""); 1299 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1300 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1301 emit_int8((unsigned char)0xDC); 1302 emit_int8(0xC0 | encode); 1303 } 1304 1305 void Assembler::aesenclast(XMMRegister dst, Address src) { 1306 assert(VM_Version::supports_aes(), ""); 1307 InstructionMark im(this); 1308 simd_prefix(dst, dst, src, VEX_SIMD_66, false, 1309 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1310 emit_int8((unsigned char)0xDD); 1311 emit_operand(dst, src); 1312 } 1313 1314 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1315 assert(VM_Version::supports_aes(), ""); 1316 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 1317 VEX_OPCODE_0F_38, false, AVX_128bit, true); 1318 emit_int8((unsigned char)0xDD); 1319 emit_int8((unsigned char)(0xC0 | encode)); 1320 } 1321 1322 1323 void Assembler::andl(Address dst, int32_t imm32) { 1324 InstructionMark im(this); 1325 prefix(dst); 1326 emit_int8((unsigned char)0x81); 1327 emit_operand(rsp, dst, 4); 1328 emit_int32(imm32); 1329 } 1330 1331 void Assembler::andl(Register dst, int32_t imm32) { 1332 prefix(dst); 1333 emit_arith(0x81, 0xE0, dst, imm32); 1334 } 1335 1336 void Assembler::andl(Register dst, Address src) { 1337 InstructionMark im(this); 1338 prefix(src, dst); 1339 emit_int8(0x23); 1340 emit_operand(dst, src); 1341 } 1342 1343 void Assembler::andl(Register dst, Register src) { 1344 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1345 emit_arith(0x23, 0xC0, dst, src); 1346 } 1347 1348 void Assembler::andnl(Register dst, Register src1, Register src2) { 1349 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1350 int encode = vex_prefix_0F38_and_encode_legacy(dst, src1, src2, false); 1351 emit_int8((unsigned char)0xF2); 1352 emit_int8((unsigned char)(0xC0 | encode)); 1353 } 1354 1355 void Assembler::andnl(Register dst, Register src1, Address src2) { 1356 InstructionMark im(this); 1357 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1358 vex_prefix_0F38_legacy(dst, src1, src2, false); 1359 emit_int8((unsigned char)0xF2); 1360 emit_operand(dst, src2); 1361 } 1362 1363 void Assembler::bsfl(Register dst, Register src) { 1364 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1365 emit_int8(0x0F); 1366 emit_int8((unsigned char)0xBC); 1367 emit_int8((unsigned char)(0xC0 | encode)); 1368 } 1369 1370 void Assembler::bsrl(Register dst, Register src) { 1371 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1372 emit_int8(0x0F); 1373 emit_int8((unsigned char)0xBD); 1374 emit_int8((unsigned char)(0xC0 | encode)); 1375 } 1376 1377 void Assembler::bswapl(Register reg) { // bswap 1378 int encode = prefix_and_encode(reg->encoding()); 1379 emit_int8(0x0F); 1380 emit_int8((unsigned char)(0xC8 | encode)); 1381 } 1382 1383 void Assembler::blsil(Register dst, Register src) { 1384 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1385 int encode = vex_prefix_0F38_and_encode_legacy(rbx, dst, src, false); 1386 emit_int8((unsigned char)0xF3); 1387 emit_int8((unsigned char)(0xC0 | encode)); 1388 } 1389 1390 void Assembler::blsil(Register dst, Address src) { 1391 InstructionMark im(this); 1392 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1393 vex_prefix_0F38_legacy(rbx, dst, src, false); 1394 emit_int8((unsigned char)0xF3); 1395 emit_operand(rbx, src); 1396 } 1397 1398 void Assembler::blsmskl(Register dst, Register src) { 1399 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1400 int encode = vex_prefix_0F38_and_encode_legacy(rdx, dst, src, false); 1401 emit_int8((unsigned char)0xF3); 1402 emit_int8((unsigned char)(0xC0 | encode)); 1403 } 1404 1405 void Assembler::blsmskl(Register dst, Address src) { 1406 InstructionMark im(this); 1407 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1408 vex_prefix_0F38(rdx, dst, src, false); 1409 emit_int8((unsigned char)0xF3); 1410 emit_operand(rdx, src); 1411 } 1412 1413 void Assembler::blsrl(Register dst, Register src) { 1414 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1415 int encode = vex_prefix_0F38_and_encode_legacy(rcx, dst, src, false); 1416 emit_int8((unsigned char)0xF3); 1417 emit_int8((unsigned char)(0xC0 | encode)); 1418 } 1419 1420 void Assembler::blsrl(Register dst, Address src) { 1421 InstructionMark im(this); 1422 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1423 vex_prefix_0F38_legacy(rcx, dst, src, false); 1424 emit_int8((unsigned char)0xF3); 1425 emit_operand(rcx, src); 1426 } 1427 1428 void Assembler::call(Label& L, relocInfo::relocType rtype) { 1429 // suspect disp32 is always good 1430 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); 1431 1432 if (L.is_bound()) { 1433 const int long_size = 5; 1434 int offs = (int)( target(L) - pc() ); 1435 assert(offs <= 0, "assembler error"); 1436 InstructionMark im(this); 1437 // 1110 1000 #32-bit disp 1438 emit_int8((unsigned char)0xE8); 1439 emit_data(offs - long_size, rtype, operand); 1440 } else { 1441 InstructionMark im(this); 1442 // 1110 1000 #32-bit disp 1443 L.add_patch_at(code(), locator()); 1444 1445 emit_int8((unsigned char)0xE8); 1446 emit_data(int(0), rtype, operand); 1447 } 1448 } 1449 1450 void Assembler::call(Register dst) { 1451 int encode = prefix_and_encode(dst->encoding()); 1452 emit_int8((unsigned char)0xFF); 1453 emit_int8((unsigned char)(0xD0 | encode)); 1454 } 1455 1456 1457 void Assembler::call(Address adr) { 1458 InstructionMark im(this); 1459 prefix(adr); 1460 emit_int8((unsigned char)0xFF); 1461 emit_operand(rdx, adr); 1462 } 1463 1464 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1465 assert(entry != NULL, "call most probably wrong"); 1466 InstructionMark im(this); 1467 emit_int8((unsigned char)0xE8); 1468 intptr_t disp = entry - (pc() + sizeof(int32_t)); 1469 assert(is_simm32(disp), "must be 32bit offset (call2)"); 1470 // Technically, should use call32_operand, but this format is 1471 // implied by the fact that we're emitting a call instruction. 1472 1473 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1474 emit_data((int) disp, rspec, operand); 1475 } 1476 1477 void Assembler::cdql() { 1478 emit_int8((unsigned char)0x99); 1479 } 1480 1481 void Assembler::cld() { 1482 emit_int8((unsigned char)0xFC); 1483 } 1484 1485 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1486 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1487 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1488 emit_int8(0x0F); 1489 emit_int8(0x40 | cc); 1490 emit_int8((unsigned char)(0xC0 | encode)); 1491 } 1492 1493 1494 void Assembler::cmovl(Condition cc, Register dst, Address src) { 1495 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1496 prefix(src, dst); 1497 emit_int8(0x0F); 1498 emit_int8(0x40 | cc); 1499 emit_operand(dst, src); 1500 } 1501 1502 void Assembler::cmpb(Address dst, int imm8) { 1503 InstructionMark im(this); 1504 prefix(dst); 1505 emit_int8((unsigned char)0x80); 1506 emit_operand(rdi, dst, 1); 1507 emit_int8(imm8); 1508 } 1509 1510 void Assembler::cmpl(Address dst, int32_t imm32) { 1511 InstructionMark im(this); 1512 prefix(dst); 1513 emit_int8((unsigned char)0x81); 1514 emit_operand(rdi, dst, 4); 1515 emit_int32(imm32); 1516 } 1517 1518 void Assembler::cmpl(Register dst, int32_t imm32) { 1519 prefix(dst); 1520 emit_arith(0x81, 0xF8, dst, imm32); 1521 } 1522 1523 void Assembler::cmpl(Register dst, Register src) { 1524 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1525 emit_arith(0x3B, 0xC0, dst, src); 1526 } 1527 1528 1529 void Assembler::cmpl(Register dst, Address src) { 1530 InstructionMark im(this); 1531 prefix(src, dst); 1532 emit_int8((unsigned char)0x3B); 1533 emit_operand(dst, src); 1534 } 1535 1536 void Assembler::cmpw(Address dst, int imm16) { 1537 InstructionMark im(this); 1538 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); 1539 emit_int8(0x66); 1540 emit_int8((unsigned char)0x81); 1541 emit_operand(rdi, dst, 2); 1542 emit_int16(imm16); 1543 } 1544 1545 // The 32-bit cmpxchg compares the value at adr with the contents of rax, 1546 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1547 // The ZF is set if the compared values were equal, and cleared otherwise. 1548 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg 1549 InstructionMark im(this); 1550 prefix(adr, reg); 1551 emit_int8(0x0F); 1552 emit_int8((unsigned char)0xB1); 1553 emit_operand(reg, adr); 1554 } 1555 1556 // The 8-bit cmpxchg compares the value at adr with the contents of rax, 1557 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1558 // The ZF is set if the compared values were equal, and cleared otherwise. 1559 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg 1560 InstructionMark im(this); 1561 prefix(adr, reg, true); 1562 emit_int8(0x0F); 1563 emit_int8((unsigned char)0xB0); 1564 emit_operand(reg, adr); 1565 } 1566 1567 void Assembler::comisd(XMMRegister dst, Address src) { 1568 // NOTE: dbx seems to decode this as comiss even though the 1569 // 0x66 is there. Strangly ucomisd comes out correct 1570 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1571 if (VM_Version::supports_evex()) { 1572 tuple_type = EVEX_T1S; 1573 input_size_in_bits = EVEX_64bit; 1574 emit_simd_arith_nonds_q(0x2F, dst, src, VEX_SIMD_66, true); 1575 } else { 1576 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66); 1577 } 1578 } 1579 1580 void Assembler::comisd(XMMRegister dst, XMMRegister src) { 1581 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1582 if (VM_Version::supports_evex()) { 1583 emit_simd_arith_nonds_q(0x2F, dst, src, VEX_SIMD_66, true); 1584 } else { 1585 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66); 1586 } 1587 } 1588 1589 void Assembler::comiss(XMMRegister dst, Address src) { 1590 if (VM_Version::supports_evex()) { 1591 tuple_type = EVEX_T1S; 1592 input_size_in_bits = EVEX_32bit; 1593 } 1594 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1595 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE, true); 1596 } 1597 1598 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1599 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1600 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE, true); 1601 } 1602 1603 void Assembler::cpuid() { 1604 emit_int8(0x0F); 1605 emit_int8((unsigned char)0xA2); 1606 } 1607 1608 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1609 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1610 emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3); 1611 } 1612 1613 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { 1614 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1615 emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE); 1616 } 1617 1618 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1619 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1620 if (VM_Version::supports_evex()) { 1621 emit_simd_arith_q(0x5A, dst, src, VEX_SIMD_F2); 1622 } else { 1623 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2); 1624 } 1625 } 1626 1627 void Assembler::cvtsd2ss(XMMRegister dst, Address src) { 1628 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1629 if (VM_Version::supports_evex()) { 1630 tuple_type = EVEX_T1F; 1631 input_size_in_bits = EVEX_64bit; 1632 emit_simd_arith_q(0x5A, dst, src, VEX_SIMD_F2); 1633 } else { 1634 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2); 1635 } 1636 } 1637 1638 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { 1639 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1640 int encode = 0; 1641 if (VM_Version::supports_evex()) { 1642 encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2, true); 1643 } else { 1644 encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, false); 1645 } 1646 emit_int8(0x2A); 1647 emit_int8((unsigned char)(0xC0 | encode)); 1648 } 1649 1650 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { 1651 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1652 if (VM_Version::supports_evex()) { 1653 tuple_type = EVEX_T1S; 1654 input_size_in_bits = EVEX_32bit; 1655 emit_simd_arith_q(0x2A, dst, src, VEX_SIMD_F2, true); 1656 } else { 1657 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2); 1658 } 1659 } 1660 1661 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { 1662 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1663 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, true); 1664 emit_int8(0x2A); 1665 emit_int8((unsigned char)(0xC0 | encode)); 1666 } 1667 1668 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { 1669 if (VM_Version::supports_evex()) { 1670 tuple_type = EVEX_T1S; 1671 input_size_in_bits = EVEX_32bit; 1672 } 1673 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1674 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3, true); 1675 } 1676 1677 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 1678 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1679 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3); 1680 } 1681 1682 void Assembler::cvtss2sd(XMMRegister dst, Address src) { 1683 if (VM_Version::supports_evex()) { 1684 tuple_type = EVEX_T1S; 1685 input_size_in_bits = EVEX_32bit; 1686 } 1687 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1688 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3); 1689 } 1690 1691 1692 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { 1693 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1694 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, true); 1695 emit_int8(0x2C); 1696 emit_int8((unsigned char)(0xC0 | encode)); 1697 } 1698 1699 void Assembler::cvttss2sil(Register dst, XMMRegister src) { 1700 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1701 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, true); 1702 emit_int8(0x2C); 1703 emit_int8((unsigned char)(0xC0 | encode)); 1704 } 1705 1706 void Assembler::decl(Address dst) { 1707 // Don't use it directly. Use MacroAssembler::decrement() instead. 1708 InstructionMark im(this); 1709 prefix(dst); 1710 emit_int8((unsigned char)0xFF); 1711 emit_operand(rcx, dst); 1712 } 1713 1714 void Assembler::divsd(XMMRegister dst, Address src) { 1715 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1716 if (VM_Version::supports_evex()) { 1717 tuple_type = EVEX_T1S; 1718 input_size_in_bits = EVEX_64bit; 1719 emit_simd_arith_q(0x5E, dst, src, VEX_SIMD_F2); 1720 } else { 1721 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2); 1722 } 1723 } 1724 1725 void Assembler::divsd(XMMRegister dst, XMMRegister src) { 1726 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1727 if (VM_Version::supports_evex()) { 1728 emit_simd_arith_q(0x5E, dst, src, VEX_SIMD_F2); 1729 } else { 1730 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2); 1731 } 1732 } 1733 1734 void Assembler::divss(XMMRegister dst, Address src) { 1735 if (VM_Version::supports_evex()) { 1736 tuple_type = EVEX_T1S; 1737 input_size_in_bits = EVEX_32bit; 1738 } 1739 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1740 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3); 1741 } 1742 1743 void Assembler::divss(XMMRegister dst, XMMRegister src) { 1744 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1745 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3); 1746 } 1747 1748 void Assembler::emms() { 1749 NOT_LP64(assert(VM_Version::supports_mmx(), "")); 1750 emit_int8(0x0F); 1751 emit_int8(0x77); 1752 } 1753 1754 void Assembler::hlt() { 1755 emit_int8((unsigned char)0xF4); 1756 } 1757 1758 void Assembler::idivl(Register src) { 1759 int encode = prefix_and_encode(src->encoding()); 1760 emit_int8((unsigned char)0xF7); 1761 emit_int8((unsigned char)(0xF8 | encode)); 1762 } 1763 1764 void Assembler::divl(Register src) { // Unsigned 1765 int encode = prefix_and_encode(src->encoding()); 1766 emit_int8((unsigned char)0xF7); 1767 emit_int8((unsigned char)(0xF0 | encode)); 1768 } 1769 1770 void Assembler::imull(Register dst, Register src) { 1771 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1772 emit_int8(0x0F); 1773 emit_int8((unsigned char)0xAF); 1774 emit_int8((unsigned char)(0xC0 | encode)); 1775 } 1776 1777 1778 void Assembler::imull(Register dst, Register src, int value) { 1779 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1780 if (is8bit(value)) { 1781 emit_int8(0x6B); 1782 emit_int8((unsigned char)(0xC0 | encode)); 1783 emit_int8(value & 0xFF); 1784 } else { 1785 emit_int8(0x69); 1786 emit_int8((unsigned char)(0xC0 | encode)); 1787 emit_int32(value); 1788 } 1789 } 1790 1791 void Assembler::imull(Register dst, Address src) { 1792 InstructionMark im(this); 1793 prefix(src, dst); 1794 emit_int8(0x0F); 1795 emit_int8((unsigned char) 0xAF); 1796 emit_operand(dst, src); 1797 } 1798 1799 1800 void Assembler::incl(Address dst) { 1801 // Don't use it directly. Use MacroAssembler::increment() instead. 1802 InstructionMark im(this); 1803 prefix(dst); 1804 emit_int8((unsigned char)0xFF); 1805 emit_operand(rax, dst); 1806 } 1807 1808 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { 1809 InstructionMark im(this); 1810 assert((0 <= cc) && (cc < 16), "illegal cc"); 1811 if (L.is_bound()) { 1812 address dst = target(L); 1813 assert(dst != NULL, "jcc most probably wrong"); 1814 1815 const int short_size = 2; 1816 const int long_size = 6; 1817 intptr_t offs = (intptr_t)dst - (intptr_t)pc(); 1818 if (maybe_short && is8bit(offs - short_size)) { 1819 // 0111 tttn #8-bit disp 1820 emit_int8(0x70 | cc); 1821 emit_int8((offs - short_size) & 0xFF); 1822 } else { 1823 // 0000 1111 1000 tttn #32-bit disp 1824 assert(is_simm32(offs - long_size), 1825 "must be 32bit offset (call4)"); 1826 emit_int8(0x0F); 1827 emit_int8((unsigned char)(0x80 | cc)); 1828 emit_int32(offs - long_size); 1829 } 1830 } else { 1831 // Note: could eliminate cond. jumps to this jump if condition 1832 // is the same however, seems to be rather unlikely case. 1833 // Note: use jccb() if label to be bound is very close to get 1834 // an 8-bit displacement 1835 L.add_patch_at(code(), locator()); 1836 emit_int8(0x0F); 1837 emit_int8((unsigned char)(0x80 | cc)); 1838 emit_int32(0); 1839 } 1840 } 1841 1842 void Assembler::jccb(Condition cc, Label& L) { 1843 if (L.is_bound()) { 1844 const int short_size = 2; 1845 address entry = target(L); 1846 #ifdef ASSERT 1847 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 1848 intptr_t delta = short_branch_delta(); 1849 if (delta != 0) { 1850 dist += (dist < 0 ? (-delta) :delta); 1851 } 1852 assert(is8bit(dist), "Dispacement too large for a short jmp"); 1853 #endif 1854 intptr_t offs = (intptr_t)entry - (intptr_t)pc(); 1855 // 0111 tttn #8-bit disp 1856 emit_int8(0x70 | cc); 1857 emit_int8((offs - short_size) & 0xFF); 1858 } else { 1859 InstructionMark im(this); 1860 L.add_patch_at(code(), locator()); 1861 emit_int8(0x70 | cc); 1862 emit_int8(0); 1863 } 1864 } 1865 1866 void Assembler::jmp(Address adr) { 1867 InstructionMark im(this); 1868 prefix(adr); 1869 emit_int8((unsigned char)0xFF); 1870 emit_operand(rsp, adr); 1871 } 1872 1873 void Assembler::jmp(Label& L, bool maybe_short) { 1874 if (L.is_bound()) { 1875 address entry = target(L); 1876 assert(entry != NULL, "jmp most probably wrong"); 1877 InstructionMark im(this); 1878 const int short_size = 2; 1879 const int long_size = 5; 1880 intptr_t offs = entry - pc(); 1881 if (maybe_short && is8bit(offs - short_size)) { 1882 emit_int8((unsigned char)0xEB); 1883 emit_int8((offs - short_size) & 0xFF); 1884 } else { 1885 emit_int8((unsigned char)0xE9); 1886 emit_int32(offs - long_size); 1887 } 1888 } else { 1889 // By default, forward jumps are always 32-bit displacements, since 1890 // we can't yet know where the label will be bound. If you're sure that 1891 // the forward jump will not run beyond 256 bytes, use jmpb to 1892 // force an 8-bit displacement. 1893 InstructionMark im(this); 1894 L.add_patch_at(code(), locator()); 1895 emit_int8((unsigned char)0xE9); 1896 emit_int32(0); 1897 } 1898 } 1899 1900 void Assembler::jmp(Register entry) { 1901 int encode = prefix_and_encode(entry->encoding()); 1902 emit_int8((unsigned char)0xFF); 1903 emit_int8((unsigned char)(0xE0 | encode)); 1904 } 1905 1906 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 1907 InstructionMark im(this); 1908 emit_int8((unsigned char)0xE9); 1909 assert(dest != NULL, "must have a target"); 1910 intptr_t disp = dest - (pc() + sizeof(int32_t)); 1911 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 1912 emit_data(disp, rspec.reloc(), call32_operand); 1913 } 1914 1915 void Assembler::jmpb(Label& L) { 1916 if (L.is_bound()) { 1917 const int short_size = 2; 1918 address entry = target(L); 1919 assert(entry != NULL, "jmp most probably wrong"); 1920 #ifdef ASSERT 1921 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 1922 intptr_t delta = short_branch_delta(); 1923 if (delta != 0) { 1924 dist += (dist < 0 ? (-delta) :delta); 1925 } 1926 assert(is8bit(dist), "Dispacement too large for a short jmp"); 1927 #endif 1928 intptr_t offs = entry - pc(); 1929 emit_int8((unsigned char)0xEB); 1930 emit_int8((offs - short_size) & 0xFF); 1931 } else { 1932 InstructionMark im(this); 1933 L.add_patch_at(code(), locator()); 1934 emit_int8((unsigned char)0xEB); 1935 emit_int8(0); 1936 } 1937 } 1938 1939 void Assembler::ldmxcsr( Address src) { 1940 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1941 InstructionMark im(this); 1942 prefix(src); 1943 emit_int8(0x0F); 1944 emit_int8((unsigned char)0xAE); 1945 emit_operand(as_Register(2), src); 1946 } 1947 1948 void Assembler::leal(Register dst, Address src) { 1949 InstructionMark im(this); 1950 #ifdef _LP64 1951 emit_int8(0x67); // addr32 1952 prefix(src, dst); 1953 #endif // LP64 1954 emit_int8((unsigned char)0x8D); 1955 emit_operand(dst, src); 1956 } 1957 1958 void Assembler::lfence() { 1959 emit_int8(0x0F); 1960 emit_int8((unsigned char)0xAE); 1961 emit_int8((unsigned char)0xE8); 1962 } 1963 1964 void Assembler::lock() { 1965 emit_int8((unsigned char)0xF0); 1966 } 1967 1968 void Assembler::lzcntl(Register dst, Register src) { 1969 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 1970 emit_int8((unsigned char)0xF3); 1971 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1972 emit_int8(0x0F); 1973 emit_int8((unsigned char)0xBD); 1974 emit_int8((unsigned char)(0xC0 | encode)); 1975 } 1976 1977 // Emit mfence instruction 1978 void Assembler::mfence() { 1979 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 1980 emit_int8(0x0F); 1981 emit_int8((unsigned char)0xAE); 1982 emit_int8((unsigned char)0xF0); 1983 } 1984 1985 void Assembler::mov(Register dst, Register src) { 1986 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 1987 } 1988 1989 void Assembler::movapd(XMMRegister dst, XMMRegister src) { 1990 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1991 if (VM_Version::supports_evex()) { 1992 emit_simd_arith_nonds_q(0x28, dst, src, VEX_SIMD_66, true); 1993 } else { 1994 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66); 1995 } 1996 } 1997 1998 void Assembler::movaps(XMMRegister dst, XMMRegister src) { 1999 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2000 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE); 2001 } 2002 2003 void Assembler::movlhps(XMMRegister dst, XMMRegister src) { 2004 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2005 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, true, VEX_OPCODE_0F, 2006 false, AVX_128bit); 2007 emit_int8(0x16); 2008 emit_int8((unsigned char)(0xC0 | encode)); 2009 } 2010 2011 void Assembler::movb(Register dst, Address src) { 2012 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 2013 InstructionMark im(this); 2014 prefix(src, dst, true); 2015 emit_int8((unsigned char)0x8A); 2016 emit_operand(dst, src); 2017 } 2018 2019 void Assembler::kmovq(KRegister dst, KRegister src) { 2020 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2021 int encode = kreg_prefix_and_encode(dst, knoreg, src, VEX_SIMD_NONE, 2022 true, VEX_OPCODE_0F, true); 2023 emit_int8((unsigned char)0x90); 2024 emit_int8((unsigned char)(0xC0 | encode)); 2025 } 2026 2027 void Assembler::kmovq(KRegister dst, Address src) { 2028 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2029 int dst_enc = dst->encoding(); 2030 int nds_enc = 0; 2031 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_NONE, 2032 VEX_OPCODE_0F, true, AVX_128bit, true, true); 2033 emit_int8((unsigned char)0x90); 2034 emit_operand((Register)dst, src); 2035 } 2036 2037 void Assembler::kmovq(Address dst, KRegister src) { 2038 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2039 int src_enc = src->encoding(); 2040 int nds_enc = 0; 2041 vex_prefix(dst, nds_enc, src_enc, VEX_SIMD_NONE, 2042 VEX_OPCODE_0F, true, AVX_128bit, true, true); 2043 emit_int8((unsigned char)0x90); 2044 emit_operand((Register)src, dst); 2045 } 2046 2047 void Assembler::kmovql(KRegister dst, Register src) { 2048 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2049 bool supports_bw = VM_Version::supports_avx512bw(); 2050 VexSimdPrefix pre = supports_bw ? VEX_SIMD_F2 : VEX_SIMD_NONE; 2051 int encode = kreg_prefix_and_encode(dst, knoreg, src, pre, true, 2052 VEX_OPCODE_0F, supports_bw); 2053 emit_int8((unsigned char)0x92); 2054 emit_int8((unsigned char)(0xC0 | encode)); 2055 } 2056 2057 void Assembler::kmovdl(KRegister dst, Register src) { 2058 NOT_LP64(assert(VM_Version::supports_evex(), "")); 2059 VexSimdPrefix pre = VM_Version::supports_avx512bw() ? VEX_SIMD_F2 : VEX_SIMD_NONE; 2060 int encode = kreg_prefix_and_encode(dst, knoreg, src, pre, true, VEX_OPCODE_0F, false); 2061 emit_int8((unsigned char)0x92); 2062 emit_int8((unsigned char)(0xC0 | encode)); 2063 } 2064 2065 void Assembler::movb(Address dst, int imm8) { 2066 InstructionMark im(this); 2067 prefix(dst); 2068 emit_int8((unsigned char)0xC6); 2069 emit_operand(rax, dst, 1); 2070 emit_int8(imm8); 2071 } 2072 2073 2074 void Assembler::movb(Address dst, Register src) { 2075 assert(src->has_byte_register(), "must have byte register"); 2076 InstructionMark im(this); 2077 prefix(dst, src, true); 2078 emit_int8((unsigned char)0x88); 2079 emit_operand(src, dst); 2080 } 2081 2082 void Assembler::movdl(XMMRegister dst, Register src) { 2083 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2084 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66, true); 2085 emit_int8(0x6E); 2086 emit_int8((unsigned char)(0xC0 | encode)); 2087 } 2088 2089 void Assembler::movdl(Register dst, XMMRegister src) { 2090 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2091 // swap src/dst to get correct prefix 2092 int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66, true); 2093 emit_int8(0x7E); 2094 emit_int8((unsigned char)(0xC0 | encode)); 2095 } 2096 2097 void Assembler::movdl(XMMRegister dst, Address src) { 2098 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2099 if (VM_Version::supports_evex()) { 2100 tuple_type = EVEX_T1S; 2101 input_size_in_bits = EVEX_32bit; 2102 } 2103 InstructionMark im(this); 2104 simd_prefix(dst, src, VEX_SIMD_66, true, VEX_OPCODE_0F); 2105 emit_int8(0x6E); 2106 emit_operand(dst, src); 2107 } 2108 2109 void Assembler::movdl(Address dst, XMMRegister src) { 2110 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2111 if (VM_Version::supports_evex()) { 2112 tuple_type = EVEX_T1S; 2113 input_size_in_bits = EVEX_32bit; 2114 } 2115 InstructionMark im(this); 2116 simd_prefix(dst, src, VEX_SIMD_66, true); 2117 emit_int8(0x7E); 2118 emit_operand(src, dst); 2119 } 2120 2121 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { 2122 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2123 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); 2124 } 2125 2126 void Assembler::movdqa(XMMRegister dst, Address src) { 2127 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2128 if (VM_Version::supports_evex()) { 2129 tuple_type = EVEX_FVM; 2130 } 2131 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); 2132 } 2133 2134 void Assembler::movdqu(XMMRegister dst, Address src) { 2135 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2136 if (VM_Version::supports_evex()) { 2137 tuple_type = EVEX_FVM; 2138 } 2139 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3); 2140 } 2141 2142 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { 2143 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2144 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3); 2145 } 2146 2147 void Assembler::movdqu(Address dst, XMMRegister src) { 2148 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2149 if (VM_Version::supports_evex()) { 2150 tuple_type = EVEX_FVM; 2151 } 2152 InstructionMark im(this); 2153 simd_prefix(dst, src, VEX_SIMD_F3, false); 2154 emit_int8(0x7F); 2155 emit_operand(src, dst); 2156 } 2157 2158 // Move Unaligned 256bit Vector 2159 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2160 assert(UseAVX > 0, ""); 2161 if (VM_Version::supports_evex()) { 2162 tuple_type = EVEX_FVM; 2163 } 2164 int vector_len = AVX_256bit; 2165 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector_len); 2166 emit_int8(0x6F); 2167 emit_int8((unsigned char)(0xC0 | encode)); 2168 } 2169 2170 void Assembler::vmovdqu(XMMRegister dst, Address src) { 2171 assert(UseAVX > 0, ""); 2172 if (VM_Version::supports_evex()) { 2173 tuple_type = EVEX_FVM; 2174 } 2175 InstructionMark im(this); 2176 int vector_len = AVX_256bit; 2177 vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector_len, false); 2178 emit_int8(0x6F); 2179 emit_operand(dst, src); 2180 } 2181 2182 void Assembler::vmovdqu(Address dst, XMMRegister src) { 2183 assert(UseAVX > 0, ""); 2184 if (VM_Version::supports_evex()) { 2185 tuple_type = EVEX_FVM; 2186 } 2187 InstructionMark im(this); 2188 int vector_len = AVX_256bit; 2189 // swap src<->dst for encoding 2190 assert(src != xnoreg, "sanity"); 2191 vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector_len, false); 2192 emit_int8(0x7F); 2193 emit_operand(src, dst); 2194 } 2195 2196 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64) 2197 void Assembler::evmovdqu(XMMRegister dst, XMMRegister src, int vector_len) { 2198 assert(UseAVX > 0, ""); 2199 int src_enc = src->encoding(); 2200 int dst_enc = dst->encoding(); 2201 int encode = vex_prefix_and_encode(dst_enc, 0, src_enc, VEX_SIMD_F3, VEX_OPCODE_0F, 2202 true, vector_len, false, false); 2203 emit_int8(0x6F); 2204 emit_int8((unsigned char)(0xC0 | encode)); 2205 } 2206 2207 void Assembler::evmovdqu(XMMRegister dst, Address src, int vector_len) { 2208 assert(UseAVX > 0, ""); 2209 InstructionMark im(this); 2210 if (VM_Version::supports_evex()) { 2211 tuple_type = EVEX_FVM; 2212 vex_prefix_q(dst, xnoreg, src, VEX_SIMD_F3, vector_len, false); 2213 } else { 2214 vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector_len, false); 2215 } 2216 emit_int8(0x6F); 2217 emit_operand(dst, src); 2218 } 2219 2220 void Assembler::evmovdqu(Address dst, XMMRegister src, int vector_len) { 2221 assert(UseAVX > 0, ""); 2222 InstructionMark im(this); 2223 assert(src != xnoreg, "sanity"); 2224 if (VM_Version::supports_evex()) { 2225 tuple_type = EVEX_FVM; 2226 // swap src<->dst for encoding 2227 vex_prefix_q(src, xnoreg, dst, VEX_SIMD_F3, vector_len, false); 2228 } else { 2229 // swap src<->dst for encoding 2230 vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector_len, false); 2231 } 2232 emit_int8(0x7F); 2233 emit_operand(src, dst); 2234 } 2235 2236 // Uses zero extension on 64bit 2237 2238 void Assembler::movl(Register dst, int32_t imm32) { 2239 int encode = prefix_and_encode(dst->encoding()); 2240 emit_int8((unsigned char)(0xB8 | encode)); 2241 emit_int32(imm32); 2242 } 2243 2244 void Assembler::movl(Register dst, Register src) { 2245 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2246 emit_int8((unsigned char)0x8B); 2247 emit_int8((unsigned char)(0xC0 | encode)); 2248 } 2249 2250 void Assembler::movl(Register dst, Address src) { 2251 InstructionMark im(this); 2252 prefix(src, dst); 2253 emit_int8((unsigned char)0x8B); 2254 emit_operand(dst, src); 2255 } 2256 2257 void Assembler::movl(Address dst, int32_t imm32) { 2258 InstructionMark im(this); 2259 prefix(dst); 2260 emit_int8((unsigned char)0xC7); 2261 emit_operand(rax, dst, 4); 2262 emit_int32(imm32); 2263 } 2264 2265 void Assembler::movl(Address dst, Register src) { 2266 InstructionMark im(this); 2267 prefix(dst, src); 2268 emit_int8((unsigned char)0x89); 2269 emit_operand(src, dst); 2270 } 2271 2272 // New cpus require to use movsd and movss to avoid partial register stall 2273 // when loading from memory. But for old Opteron use movlpd instead of movsd. 2274 // The selection is done in MacroAssembler::movdbl() and movflt(). 2275 void Assembler::movlpd(XMMRegister dst, Address src) { 2276 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2277 if (VM_Version::supports_evex()) { 2278 tuple_type = EVEX_T1S; 2279 input_size_in_bits = EVEX_32bit; 2280 } 2281 emit_simd_arith(0x12, dst, src, VEX_SIMD_66, true); 2282 } 2283 2284 void Assembler::movq( MMXRegister dst, Address src ) { 2285 assert( VM_Version::supports_mmx(), "" ); 2286 emit_int8(0x0F); 2287 emit_int8(0x6F); 2288 emit_operand(dst, src); 2289 } 2290 2291 void Assembler::movq( Address dst, MMXRegister src ) { 2292 assert( VM_Version::supports_mmx(), "" ); 2293 emit_int8(0x0F); 2294 emit_int8(0x7F); 2295 // workaround gcc (3.2.1-7a) bug 2296 // In that version of gcc with only an emit_operand(MMX, Address) 2297 // gcc will tail jump and try and reverse the parameters completely 2298 // obliterating dst in the process. By having a version available 2299 // that doesn't need to swap the args at the tail jump the bug is 2300 // avoided. 2301 emit_operand(dst, src); 2302 } 2303 2304 void Assembler::movq(XMMRegister dst, Address src) { 2305 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2306 InstructionMark im(this); 2307 if (VM_Version::supports_evex()) { 2308 tuple_type = EVEX_T1S; 2309 input_size_in_bits = EVEX_64bit; 2310 simd_prefix_q(dst, xnoreg, src, VEX_SIMD_F3, true); 2311 } else { 2312 simd_prefix(dst, src, VEX_SIMD_F3, true, VEX_OPCODE_0F); 2313 } 2314 emit_int8(0x7E); 2315 emit_operand(dst, src); 2316 } 2317 2318 void Assembler::movq(Address dst, XMMRegister src) { 2319 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2320 InstructionMark im(this); 2321 if (VM_Version::supports_evex()) { 2322 tuple_type = EVEX_T1S; 2323 input_size_in_bits = EVEX_64bit; 2324 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, true, 2325 VEX_OPCODE_0F, true, AVX_128bit); 2326 } else { 2327 simd_prefix(dst, src, VEX_SIMD_66, true); 2328 } 2329 emit_int8((unsigned char)0xD6); 2330 emit_operand(src, dst); 2331 } 2332 2333 void Assembler::movsbl(Register dst, Address src) { // movsxb 2334 InstructionMark im(this); 2335 prefix(src, dst); 2336 emit_int8(0x0F); 2337 emit_int8((unsigned char)0xBE); 2338 emit_operand(dst, src); 2339 } 2340 2341 void Assembler::movsbl(Register dst, Register src) { // movsxb 2342 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2343 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); 2344 emit_int8(0x0F); 2345 emit_int8((unsigned char)0xBE); 2346 emit_int8((unsigned char)(0xC0 | encode)); 2347 } 2348 2349 void Assembler::movsd(XMMRegister dst, XMMRegister src) { 2350 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2351 if (VM_Version::supports_evex()) { 2352 emit_simd_arith_q(0x10, dst, src, VEX_SIMD_F2, true); 2353 } else { 2354 emit_simd_arith(0x10, dst, src, VEX_SIMD_F2); 2355 } 2356 } 2357 2358 void Assembler::movsd(XMMRegister dst, Address src) { 2359 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2360 if (VM_Version::supports_evex()) { 2361 tuple_type = EVEX_T1S; 2362 input_size_in_bits = EVEX_64bit; 2363 emit_simd_arith_nonds_q(0x10, dst, src, VEX_SIMD_F2, true); 2364 } else { 2365 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2); 2366 } 2367 } 2368 2369 void Assembler::movsd(Address dst, XMMRegister src) { 2370 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2371 InstructionMark im(this); 2372 if (VM_Version::supports_evex()) { 2373 tuple_type = EVEX_T1S; 2374 input_size_in_bits = EVEX_64bit; 2375 simd_prefix_q(src, xnoreg, dst, VEX_SIMD_F2); 2376 } else { 2377 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, false); 2378 } 2379 emit_int8(0x11); 2380 emit_operand(src, dst); 2381 } 2382 2383 void Assembler::movss(XMMRegister dst, XMMRegister src) { 2384 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2385 emit_simd_arith(0x10, dst, src, VEX_SIMD_F3, true); 2386 } 2387 2388 void Assembler::movss(XMMRegister dst, Address src) { 2389 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2390 if (VM_Version::supports_evex()) { 2391 tuple_type = EVEX_T1S; 2392 input_size_in_bits = EVEX_32bit; 2393 } 2394 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3, true); 2395 } 2396 2397 void Assembler::movss(Address dst, XMMRegister src) { 2398 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2399 if (VM_Version::supports_evex()) { 2400 tuple_type = EVEX_T1S; 2401 input_size_in_bits = EVEX_32bit; 2402 } 2403 InstructionMark im(this); 2404 simd_prefix(dst, src, VEX_SIMD_F3, false); 2405 emit_int8(0x11); 2406 emit_operand(src, dst); 2407 } 2408 2409 void Assembler::movswl(Register dst, Address src) { // movsxw 2410 InstructionMark im(this); 2411 prefix(src, dst); 2412 emit_int8(0x0F); 2413 emit_int8((unsigned char)0xBF); 2414 emit_operand(dst, src); 2415 } 2416 2417 void Assembler::movswl(Register dst, Register src) { // movsxw 2418 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2419 emit_int8(0x0F); 2420 emit_int8((unsigned char)0xBF); 2421 emit_int8((unsigned char)(0xC0 | encode)); 2422 } 2423 2424 void Assembler::movw(Address dst, int imm16) { 2425 InstructionMark im(this); 2426 2427 emit_int8(0x66); // switch to 16-bit mode 2428 prefix(dst); 2429 emit_int8((unsigned char)0xC7); 2430 emit_operand(rax, dst, 2); 2431 emit_int16(imm16); 2432 } 2433 2434 void Assembler::movw(Register dst, Address src) { 2435 InstructionMark im(this); 2436 emit_int8(0x66); 2437 prefix(src, dst); 2438 emit_int8((unsigned char)0x8B); 2439 emit_operand(dst, src); 2440 } 2441 2442 void Assembler::movw(Address dst, Register src) { 2443 InstructionMark im(this); 2444 emit_int8(0x66); 2445 prefix(dst, src); 2446 emit_int8((unsigned char)0x89); 2447 emit_operand(src, dst); 2448 } 2449 2450 void Assembler::movzbl(Register dst, Address src) { // movzxb 2451 InstructionMark im(this); 2452 prefix(src, dst); 2453 emit_int8(0x0F); 2454 emit_int8((unsigned char)0xB6); 2455 emit_operand(dst, src); 2456 } 2457 2458 void Assembler::movzbl(Register dst, Register src) { // movzxb 2459 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2460 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); 2461 emit_int8(0x0F); 2462 emit_int8((unsigned char)0xB6); 2463 emit_int8(0xC0 | encode); 2464 } 2465 2466 void Assembler::movzwl(Register dst, Address src) { // movzxw 2467 InstructionMark im(this); 2468 prefix(src, dst); 2469 emit_int8(0x0F); 2470 emit_int8((unsigned char)0xB7); 2471 emit_operand(dst, src); 2472 } 2473 2474 void Assembler::movzwl(Register dst, Register src) { // movzxw 2475 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2476 emit_int8(0x0F); 2477 emit_int8((unsigned char)0xB7); 2478 emit_int8(0xC0 | encode); 2479 } 2480 2481 void Assembler::mull(Address src) { 2482 InstructionMark im(this); 2483 prefix(src); 2484 emit_int8((unsigned char)0xF7); 2485 emit_operand(rsp, src); 2486 } 2487 2488 void Assembler::mull(Register src) { 2489 int encode = prefix_and_encode(src->encoding()); 2490 emit_int8((unsigned char)0xF7); 2491 emit_int8((unsigned char)(0xE0 | encode)); 2492 } 2493 2494 void Assembler::mulsd(XMMRegister dst, Address src) { 2495 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2496 if (VM_Version::supports_evex()) { 2497 tuple_type = EVEX_T1S; 2498 input_size_in_bits = EVEX_64bit; 2499 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_F2); 2500 } else { 2501 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2); 2502 } 2503 } 2504 2505 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { 2506 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2507 if (VM_Version::supports_evex()) { 2508 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_F2); 2509 } else { 2510 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2); 2511 } 2512 } 2513 2514 void Assembler::mulss(XMMRegister dst, Address src) { 2515 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2516 if (VM_Version::supports_evex()) { 2517 tuple_type = EVEX_T1S; 2518 input_size_in_bits = EVEX_32bit; 2519 } 2520 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3); 2521 } 2522 2523 void Assembler::mulss(XMMRegister dst, XMMRegister src) { 2524 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2525 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3); 2526 } 2527 2528 void Assembler::negl(Register dst) { 2529 int encode = prefix_and_encode(dst->encoding()); 2530 emit_int8((unsigned char)0xF7); 2531 emit_int8((unsigned char)(0xD8 | encode)); 2532 } 2533 2534 void Assembler::nop(int i) { 2535 #ifdef ASSERT 2536 assert(i > 0, " "); 2537 // The fancy nops aren't currently recognized by debuggers making it a 2538 // pain to disassemble code while debugging. If asserts are on clearly 2539 // speed is not an issue so simply use the single byte traditional nop 2540 // to do alignment. 2541 2542 for (; i > 0 ; i--) emit_int8((unsigned char)0x90); 2543 return; 2544 2545 #endif // ASSERT 2546 2547 if (UseAddressNop && VM_Version::is_intel()) { 2548 // 2549 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel 2550 // 1: 0x90 2551 // 2: 0x66 0x90 2552 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2553 // 4: 0x0F 0x1F 0x40 0x00 2554 // 5: 0x0F 0x1F 0x44 0x00 0x00 2555 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2556 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2557 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2558 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2559 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2560 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2561 2562 // The rest coding is Intel specific - don't use consecutive address nops 2563 2564 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2565 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2566 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2567 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2568 2569 while(i >= 15) { 2570 // For Intel don't generate consecutive addess nops (mix with regular nops) 2571 i -= 15; 2572 emit_int8(0x66); // size prefix 2573 emit_int8(0x66); // size prefix 2574 emit_int8(0x66); // size prefix 2575 addr_nop_8(); 2576 emit_int8(0x66); // size prefix 2577 emit_int8(0x66); // size prefix 2578 emit_int8(0x66); // size prefix 2579 emit_int8((unsigned char)0x90); 2580 // nop 2581 } 2582 switch (i) { 2583 case 14: 2584 emit_int8(0x66); // size prefix 2585 case 13: 2586 emit_int8(0x66); // size prefix 2587 case 12: 2588 addr_nop_8(); 2589 emit_int8(0x66); // size prefix 2590 emit_int8(0x66); // size prefix 2591 emit_int8(0x66); // size prefix 2592 emit_int8((unsigned char)0x90); 2593 // nop 2594 break; 2595 case 11: 2596 emit_int8(0x66); // size prefix 2597 case 10: 2598 emit_int8(0x66); // size prefix 2599 case 9: 2600 emit_int8(0x66); // size prefix 2601 case 8: 2602 addr_nop_8(); 2603 break; 2604 case 7: 2605 addr_nop_7(); 2606 break; 2607 case 6: 2608 emit_int8(0x66); // size prefix 2609 case 5: 2610 addr_nop_5(); 2611 break; 2612 case 4: 2613 addr_nop_4(); 2614 break; 2615 case 3: 2616 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 2617 emit_int8(0x66); // size prefix 2618 case 2: 2619 emit_int8(0x66); // size prefix 2620 case 1: 2621 emit_int8((unsigned char)0x90); 2622 // nop 2623 break; 2624 default: 2625 assert(i == 0, " "); 2626 } 2627 return; 2628 } 2629 if (UseAddressNop && VM_Version::is_amd()) { 2630 // 2631 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. 2632 // 1: 0x90 2633 // 2: 0x66 0x90 2634 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2635 // 4: 0x0F 0x1F 0x40 0x00 2636 // 5: 0x0F 0x1F 0x44 0x00 0x00 2637 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2638 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2639 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2640 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2641 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2642 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2643 2644 // The rest coding is AMD specific - use consecutive address nops 2645 2646 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 2647 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 2648 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2649 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2650 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2651 // Size prefixes (0x66) are added for larger sizes 2652 2653 while(i >= 22) { 2654 i -= 11; 2655 emit_int8(0x66); // size prefix 2656 emit_int8(0x66); // size prefix 2657 emit_int8(0x66); // size prefix 2658 addr_nop_8(); 2659 } 2660 // Generate first nop for size between 21-12 2661 switch (i) { 2662 case 21: 2663 i -= 1; 2664 emit_int8(0x66); // size prefix 2665 case 20: 2666 case 19: 2667 i -= 1; 2668 emit_int8(0x66); // size prefix 2669 case 18: 2670 case 17: 2671 i -= 1; 2672 emit_int8(0x66); // size prefix 2673 case 16: 2674 case 15: 2675 i -= 8; 2676 addr_nop_8(); 2677 break; 2678 case 14: 2679 case 13: 2680 i -= 7; 2681 addr_nop_7(); 2682 break; 2683 case 12: 2684 i -= 6; 2685 emit_int8(0x66); // size prefix 2686 addr_nop_5(); 2687 break; 2688 default: 2689 assert(i < 12, " "); 2690 } 2691 2692 // Generate second nop for size between 11-1 2693 switch (i) { 2694 case 11: 2695 emit_int8(0x66); // size prefix 2696 case 10: 2697 emit_int8(0x66); // size prefix 2698 case 9: 2699 emit_int8(0x66); // size prefix 2700 case 8: 2701 addr_nop_8(); 2702 break; 2703 case 7: 2704 addr_nop_7(); 2705 break; 2706 case 6: 2707 emit_int8(0x66); // size prefix 2708 case 5: 2709 addr_nop_5(); 2710 break; 2711 case 4: 2712 addr_nop_4(); 2713 break; 2714 case 3: 2715 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 2716 emit_int8(0x66); // size prefix 2717 case 2: 2718 emit_int8(0x66); // size prefix 2719 case 1: 2720 emit_int8((unsigned char)0x90); 2721 // nop 2722 break; 2723 default: 2724 assert(i == 0, " "); 2725 } 2726 return; 2727 } 2728 2729 // Using nops with size prefixes "0x66 0x90". 2730 // From AMD Optimization Guide: 2731 // 1: 0x90 2732 // 2: 0x66 0x90 2733 // 3: 0x66 0x66 0x90 2734 // 4: 0x66 0x66 0x66 0x90 2735 // 5: 0x66 0x66 0x90 0x66 0x90 2736 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 2737 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 2738 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 2739 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 2740 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 2741 // 2742 while(i > 12) { 2743 i -= 4; 2744 emit_int8(0x66); // size prefix 2745 emit_int8(0x66); 2746 emit_int8(0x66); 2747 emit_int8((unsigned char)0x90); 2748 // nop 2749 } 2750 // 1 - 12 nops 2751 if(i > 8) { 2752 if(i > 9) { 2753 i -= 1; 2754 emit_int8(0x66); 2755 } 2756 i -= 3; 2757 emit_int8(0x66); 2758 emit_int8(0x66); 2759 emit_int8((unsigned char)0x90); 2760 } 2761 // 1 - 8 nops 2762 if(i > 4) { 2763 if(i > 6) { 2764 i -= 1; 2765 emit_int8(0x66); 2766 } 2767 i -= 3; 2768 emit_int8(0x66); 2769 emit_int8(0x66); 2770 emit_int8((unsigned char)0x90); 2771 } 2772 switch (i) { 2773 case 4: 2774 emit_int8(0x66); 2775 case 3: 2776 emit_int8(0x66); 2777 case 2: 2778 emit_int8(0x66); 2779 case 1: 2780 emit_int8((unsigned char)0x90); 2781 break; 2782 default: 2783 assert(i == 0, " "); 2784 } 2785 } 2786 2787 void Assembler::notl(Register dst) { 2788 int encode = prefix_and_encode(dst->encoding()); 2789 emit_int8((unsigned char)0xF7); 2790 emit_int8((unsigned char)(0xD0 | encode)); 2791 } 2792 2793 void Assembler::orl(Address dst, int32_t imm32) { 2794 InstructionMark im(this); 2795 prefix(dst); 2796 emit_arith_operand(0x81, rcx, dst, imm32); 2797 } 2798 2799 void Assembler::orl(Register dst, int32_t imm32) { 2800 prefix(dst); 2801 emit_arith(0x81, 0xC8, dst, imm32); 2802 } 2803 2804 void Assembler::orl(Register dst, Address src) { 2805 InstructionMark im(this); 2806 prefix(src, dst); 2807 emit_int8(0x0B); 2808 emit_operand(dst, src); 2809 } 2810 2811 void Assembler::orl(Register dst, Register src) { 2812 (void) prefix_and_encode(dst->encoding(), src->encoding()); 2813 emit_arith(0x0B, 0xC0, dst, src); 2814 } 2815 2816 void Assembler::orl(Address dst, Register src) { 2817 InstructionMark im(this); 2818 prefix(dst, src); 2819 emit_int8(0x09); 2820 emit_operand(src, dst); 2821 } 2822 2823 void Assembler::packuswb(XMMRegister dst, Address src) { 2824 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2825 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 2826 if (VM_Version::supports_evex()) { 2827 tuple_type = EVEX_FV; 2828 input_size_in_bits = EVEX_32bit; 2829 } 2830 emit_simd_arith(0x67, dst, src, VEX_SIMD_66, 2831 false, (VM_Version::supports_avx512dq() == false)); 2832 } 2833 2834 void Assembler::packuswb(XMMRegister dst, XMMRegister src) { 2835 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2836 emit_simd_arith(0x67, dst, src, VEX_SIMD_66, 2837 false, (VM_Version::supports_avx512dq() == false)); 2838 } 2839 2840 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2841 assert(UseAVX > 0, "some form of AVX must be enabled"); 2842 emit_vex_arith(0x67, dst, nds, src, VEX_SIMD_66, vector_len, 2843 false, (VM_Version::supports_avx512dq() == false)); 2844 } 2845 2846 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 2847 assert(VM_Version::supports_avx2(), ""); 2848 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, 2849 VEX_OPCODE_0F_3A, true, vector_len); 2850 emit_int8(0x00); 2851 emit_int8(0xC0 | encode); 2852 emit_int8(imm8); 2853 } 2854 2855 void Assembler::pause() { 2856 emit_int8((unsigned char)0xF3); 2857 emit_int8((unsigned char)0x90); 2858 } 2859 2860 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 2861 assert(VM_Version::supports_sse4_2(), ""); 2862 InstructionMark im(this); 2863 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, false, VEX_OPCODE_0F_3A, 2864 false, AVX_128bit, true); 2865 emit_int8(0x61); 2866 emit_operand(dst, src); 2867 emit_int8(imm8); 2868 } 2869 2870 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 2871 assert(VM_Version::supports_sse4_2(), ""); 2872 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, 2873 VEX_OPCODE_0F_3A, false, AVX_128bit, true); 2874 emit_int8(0x61); 2875 emit_int8((unsigned char)(0xC0 | encode)); 2876 emit_int8(imm8); 2877 } 2878 2879 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 2880 assert(VM_Version::supports_sse4_1(), ""); 2881 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2882 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2883 emit_int8(0x16); 2884 emit_int8((unsigned char)(0xC0 | encode)); 2885 emit_int8(imm8); 2886 } 2887 2888 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 2889 assert(VM_Version::supports_sse4_1(), ""); 2890 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2891 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2892 emit_int8(0x16); 2893 emit_int8((unsigned char)(0xC0 | encode)); 2894 emit_int8(imm8); 2895 } 2896 2897 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { 2898 assert(VM_Version::supports_sse2(), ""); 2899 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2900 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 2901 emit_int8(0x15); 2902 emit_int8((unsigned char)(0xC0 | encode)); 2903 emit_int8(imm8); 2904 } 2905 2906 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 2907 assert(VM_Version::supports_sse4_1(), ""); 2908 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2909 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2910 emit_int8(0x22); 2911 emit_int8((unsigned char)(0xC0 | encode)); 2912 emit_int8(imm8); 2913 } 2914 2915 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 2916 assert(VM_Version::supports_sse4_1(), ""); 2917 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F_3A, 2918 false, AVX_128bit, (VM_Version::supports_avx512dq() == false)); 2919 emit_int8(0x22); 2920 emit_int8((unsigned char)(0xC0 | encode)); 2921 emit_int8(imm8); 2922 } 2923 2924 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { 2925 assert(VM_Version::supports_sse2(), ""); 2926 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, true, VEX_OPCODE_0F, 2927 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 2928 emit_int8((unsigned char)0xC4); 2929 emit_int8((unsigned char)(0xC0 | encode)); 2930 emit_int8(imm8); 2931 } 2932 2933 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 2934 assert(VM_Version::supports_sse4_1(), ""); 2935 if (VM_Version::supports_evex()) { 2936 tuple_type = EVEX_HVM; 2937 } 2938 InstructionMark im(this); 2939 simd_prefix(dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38); 2940 emit_int8(0x30); 2941 emit_operand(dst, src); 2942 } 2943 2944 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 2945 assert(VM_Version::supports_sse4_1(), ""); 2946 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38); 2947 emit_int8(0x30); 2948 emit_int8((unsigned char)(0xC0 | encode)); 2949 } 2950 2951 // generic 2952 void Assembler::pop(Register dst) { 2953 int encode = prefix_and_encode(dst->encoding()); 2954 emit_int8(0x58 | encode); 2955 } 2956 2957 void Assembler::popcntl(Register dst, Address src) { 2958 assert(VM_Version::supports_popcnt(), "must support"); 2959 InstructionMark im(this); 2960 emit_int8((unsigned char)0xF3); 2961 prefix(src, dst); 2962 emit_int8(0x0F); 2963 emit_int8((unsigned char)0xB8); 2964 emit_operand(dst, src); 2965 } 2966 2967 void Assembler::popcntl(Register dst, Register src) { 2968 assert(VM_Version::supports_popcnt(), "must support"); 2969 emit_int8((unsigned char)0xF3); 2970 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2971 emit_int8(0x0F); 2972 emit_int8((unsigned char)0xB8); 2973 emit_int8((unsigned char)(0xC0 | encode)); 2974 } 2975 2976 void Assembler::popf() { 2977 emit_int8((unsigned char)0x9D); 2978 } 2979 2980 #ifndef _LP64 // no 32bit push/pop on amd64 2981 void Assembler::popl(Address dst) { 2982 // NOTE: this will adjust stack by 8byte on 64bits 2983 InstructionMark im(this); 2984 prefix(dst); 2985 emit_int8((unsigned char)0x8F); 2986 emit_operand(rax, dst); 2987 } 2988 #endif 2989 2990 void Assembler::prefetch_prefix(Address src) { 2991 prefix(src); 2992 emit_int8(0x0F); 2993 } 2994 2995 void Assembler::prefetchnta(Address src) { 2996 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 2997 InstructionMark im(this); 2998 prefetch_prefix(src); 2999 emit_int8(0x18); 3000 emit_operand(rax, src); // 0, src 3001 } 3002 3003 void Assembler::prefetchr(Address src) { 3004 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3005 InstructionMark im(this); 3006 prefetch_prefix(src); 3007 emit_int8(0x0D); 3008 emit_operand(rax, src); // 0, src 3009 } 3010 3011 void Assembler::prefetcht0(Address src) { 3012 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3013 InstructionMark im(this); 3014 prefetch_prefix(src); 3015 emit_int8(0x18); 3016 emit_operand(rcx, src); // 1, src 3017 } 3018 3019 void Assembler::prefetcht1(Address src) { 3020 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3021 InstructionMark im(this); 3022 prefetch_prefix(src); 3023 emit_int8(0x18); 3024 emit_operand(rdx, src); // 2, src 3025 } 3026 3027 void Assembler::prefetcht2(Address src) { 3028 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3029 InstructionMark im(this); 3030 prefetch_prefix(src); 3031 emit_int8(0x18); 3032 emit_operand(rbx, src); // 3, src 3033 } 3034 3035 void Assembler::prefetchw(Address src) { 3036 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3037 InstructionMark im(this); 3038 prefetch_prefix(src); 3039 emit_int8(0x0D); 3040 emit_operand(rcx, src); // 1, src 3041 } 3042 3043 void Assembler::prefix(Prefix p) { 3044 emit_int8(p); 3045 } 3046 3047 void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 3048 assert(VM_Version::supports_ssse3(), ""); 3049 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38, 3050 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3051 emit_int8(0x00); 3052 emit_int8((unsigned char)(0xC0 | encode)); 3053 } 3054 3055 void Assembler::pshufb(XMMRegister dst, Address src) { 3056 assert(VM_Version::supports_ssse3(), ""); 3057 if (VM_Version::supports_evex()) { 3058 tuple_type = EVEX_FVM; 3059 } 3060 InstructionMark im(this); 3061 simd_prefix(dst, dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38, 3062 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3063 emit_int8(0x00); 3064 emit_operand(dst, src); 3065 } 3066 3067 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 3068 assert(isByte(mode), "invalid value"); 3069 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3070 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66); 3071 emit_int8(mode & 0xFF); 3072 3073 } 3074 3075 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { 3076 assert(isByte(mode), "invalid value"); 3077 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3078 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3079 if (VM_Version::supports_evex()) { 3080 tuple_type = EVEX_FV; 3081 input_size_in_bits = EVEX_32bit; 3082 } 3083 InstructionMark im(this); 3084 simd_prefix(dst, src, VEX_SIMD_66, false); 3085 emit_int8(0x70); 3086 emit_operand(dst, src); 3087 emit_int8(mode & 0xFF); 3088 } 3089 3090 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3091 assert(isByte(mode), "invalid value"); 3092 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3093 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2, false, 3094 (VM_Version::supports_avx512bw() == false)); 3095 emit_int8(mode & 0xFF); 3096 } 3097 3098 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { 3099 assert(isByte(mode), "invalid value"); 3100 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3101 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3102 if (VM_Version::supports_evex()) { 3103 tuple_type = EVEX_FVM; 3104 } 3105 InstructionMark im(this); 3106 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, false, VEX_OPCODE_0F, 3107 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3108 emit_int8(0x70); 3109 emit_operand(dst, src); 3110 emit_int8(mode & 0xFF); 3111 } 3112 3113 void Assembler::psrldq(XMMRegister dst, int shift) { 3114 // Shift 128 bit value in xmm register by number of bytes. 3115 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3116 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3117 emit_int8(0x73); 3118 emit_int8((unsigned char)(0xC0 | encode)); 3119 emit_int8(shift); 3120 } 3121 3122 void Assembler::pslldq(XMMRegister dst, int shift) { 3123 // Shift left 128 bit value in xmm register by number of bytes. 3124 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3125 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 3126 emit_int8(0x73); 3127 emit_int8((unsigned char)(0xC0 | encode)); 3128 emit_int8(shift); 3129 } 3130 3131 void Assembler::ptest(XMMRegister dst, Address src) { 3132 assert(VM_Version::supports_sse4_1(), ""); 3133 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3134 InstructionMark im(this); 3135 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, false, 3136 VEX_OPCODE_0F_38, false, AVX_128bit, true); 3137 emit_int8(0x17); 3138 emit_operand(dst, src); 3139 } 3140 3141 void Assembler::ptest(XMMRegister dst, XMMRegister src) { 3142 assert(VM_Version::supports_sse4_1(), ""); 3143 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false, 3144 VEX_OPCODE_0F_38, false, AVX_128bit, true); 3145 emit_int8(0x17); 3146 emit_int8((unsigned char)(0xC0 | encode)); 3147 } 3148 3149 void Assembler::vptest(XMMRegister dst, Address src) { 3150 assert(VM_Version::supports_avx(), ""); 3151 InstructionMark im(this); 3152 int vector_len = AVX_256bit; 3153 assert(dst != xnoreg, "sanity"); 3154 int dst_enc = dst->encoding(); 3155 // swap src<->dst for encoding 3156 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len, true, false); 3157 emit_int8(0x17); 3158 emit_operand(dst, src); 3159 } 3160 3161 void Assembler::vptest(XMMRegister dst, XMMRegister src) { 3162 assert(VM_Version::supports_avx(), ""); 3163 int vector_len = AVX_256bit; 3164 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 3165 vector_len, VEX_OPCODE_0F_38, true, false); 3166 emit_int8(0x17); 3167 emit_int8((unsigned char)(0xC0 | encode)); 3168 } 3169 3170 void Assembler::punpcklbw(XMMRegister dst, Address src) { 3171 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3172 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3173 if (VM_Version::supports_evex()) { 3174 tuple_type = EVEX_FVM; 3175 } 3176 emit_simd_arith(0x60, dst, src, VEX_SIMD_66, false, (VM_Version::supports_avx512vlbw() == false)); 3177 } 3178 3179 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3180 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3181 emit_simd_arith(0x60, dst, src, VEX_SIMD_66, false, (VM_Version::supports_avx512vlbw() == false)); 3182 } 3183 3184 void Assembler::punpckldq(XMMRegister dst, Address src) { 3185 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3186 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3187 if (VM_Version::supports_evex()) { 3188 tuple_type = EVEX_FV; 3189 input_size_in_bits = EVEX_32bit; 3190 } 3191 emit_simd_arith(0x62, dst, src, VEX_SIMD_66); 3192 } 3193 3194 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { 3195 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3196 emit_simd_arith(0x62, dst, src, VEX_SIMD_66); 3197 } 3198 3199 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 3200 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3201 emit_simd_arith(0x6C, dst, src, VEX_SIMD_66); 3202 } 3203 3204 void Assembler::push(int32_t imm32) { 3205 // in 64bits we push 64bits onto the stack but only 3206 // take a 32bit immediate 3207 emit_int8(0x68); 3208 emit_int32(imm32); 3209 } 3210 3211 void Assembler::push(Register src) { 3212 int encode = prefix_and_encode(src->encoding()); 3213 3214 emit_int8(0x50 | encode); 3215 } 3216 3217 void Assembler::pushf() { 3218 emit_int8((unsigned char)0x9C); 3219 } 3220 3221 #ifndef _LP64 // no 32bit push/pop on amd64 3222 void Assembler::pushl(Address src) { 3223 // Note this will push 64bit on 64bit 3224 InstructionMark im(this); 3225 prefix(src); 3226 emit_int8((unsigned char)0xFF); 3227 emit_operand(rsi, src); 3228 } 3229 #endif 3230 3231 void Assembler::rcll(Register dst, int imm8) { 3232 assert(isShiftCount(imm8), "illegal shift count"); 3233 int encode = prefix_and_encode(dst->encoding()); 3234 if (imm8 == 1) { 3235 emit_int8((unsigned char)0xD1); 3236 emit_int8((unsigned char)(0xD0 | encode)); 3237 } else { 3238 emit_int8((unsigned char)0xC1); 3239 emit_int8((unsigned char)0xD0 | encode); 3240 emit_int8(imm8); 3241 } 3242 } 3243 3244 void Assembler::rdtsc() { 3245 emit_int8((unsigned char)0x0F); 3246 emit_int8((unsigned char)0x31); 3247 } 3248 3249 // copies data from [esi] to [edi] using rcx pointer sized words 3250 // generic 3251 void Assembler::rep_mov() { 3252 emit_int8((unsigned char)0xF3); 3253 // MOVSQ 3254 LP64_ONLY(prefix(REX_W)); 3255 emit_int8((unsigned char)0xA5); 3256 } 3257 3258 // sets rcx bytes with rax, value at [edi] 3259 void Assembler::rep_stosb() { 3260 emit_int8((unsigned char)0xF3); // REP 3261 LP64_ONLY(prefix(REX_W)); 3262 emit_int8((unsigned char)0xAA); // STOSB 3263 } 3264 3265 // sets rcx pointer sized words with rax, value at [edi] 3266 // generic 3267 void Assembler::rep_stos() { 3268 emit_int8((unsigned char)0xF3); // REP 3269 LP64_ONLY(prefix(REX_W)); // LP64:STOSQ, LP32:STOSD 3270 emit_int8((unsigned char)0xAB); 3271 } 3272 3273 // scans rcx pointer sized words at [edi] for occurance of rax, 3274 // generic 3275 void Assembler::repne_scan() { // repne_scan 3276 emit_int8((unsigned char)0xF2); 3277 // SCASQ 3278 LP64_ONLY(prefix(REX_W)); 3279 emit_int8((unsigned char)0xAF); 3280 } 3281 3282 #ifdef _LP64 3283 // scans rcx 4 byte words at [edi] for occurance of rax, 3284 // generic 3285 void Assembler::repne_scanl() { // repne_scan 3286 emit_int8((unsigned char)0xF2); 3287 // SCASL 3288 emit_int8((unsigned char)0xAF); 3289 } 3290 #endif 3291 3292 void Assembler::ret(int imm16) { 3293 if (imm16 == 0) { 3294 emit_int8((unsigned char)0xC3); 3295 } else { 3296 emit_int8((unsigned char)0xC2); 3297 emit_int16(imm16); 3298 } 3299 } 3300 3301 void Assembler::sahf() { 3302 #ifdef _LP64 3303 // Not supported in 64bit mode 3304 ShouldNotReachHere(); 3305 #endif 3306 emit_int8((unsigned char)0x9E); 3307 } 3308 3309 void Assembler::sarl(Register dst, int imm8) { 3310 int encode = prefix_and_encode(dst->encoding()); 3311 assert(isShiftCount(imm8), "illegal shift count"); 3312 if (imm8 == 1) { 3313 emit_int8((unsigned char)0xD1); 3314 emit_int8((unsigned char)(0xF8 | encode)); 3315 } else { 3316 emit_int8((unsigned char)0xC1); 3317 emit_int8((unsigned char)(0xF8 | encode)); 3318 emit_int8(imm8); 3319 } 3320 } 3321 3322 void Assembler::sarl(Register dst) { 3323 int encode = prefix_and_encode(dst->encoding()); 3324 emit_int8((unsigned char)0xD3); 3325 emit_int8((unsigned char)(0xF8 | encode)); 3326 } 3327 3328 void Assembler::sbbl(Address dst, int32_t imm32) { 3329 InstructionMark im(this); 3330 prefix(dst); 3331 emit_arith_operand(0x81, rbx, dst, imm32); 3332 } 3333 3334 void Assembler::sbbl(Register dst, int32_t imm32) { 3335 prefix(dst); 3336 emit_arith(0x81, 0xD8, dst, imm32); 3337 } 3338 3339 3340 void Assembler::sbbl(Register dst, Address src) { 3341 InstructionMark im(this); 3342 prefix(src, dst); 3343 emit_int8(0x1B); 3344 emit_operand(dst, src); 3345 } 3346 3347 void Assembler::sbbl(Register dst, Register src) { 3348 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3349 emit_arith(0x1B, 0xC0, dst, src); 3350 } 3351 3352 void Assembler::setb(Condition cc, Register dst) { 3353 assert(0 <= cc && cc < 16, "illegal cc"); 3354 int encode = prefix_and_encode(dst->encoding(), true); 3355 emit_int8(0x0F); 3356 emit_int8((unsigned char)0x90 | cc); 3357 emit_int8((unsigned char)(0xC0 | encode)); 3358 } 3359 3360 void Assembler::shll(Register dst, int imm8) { 3361 assert(isShiftCount(imm8), "illegal shift count"); 3362 int encode = prefix_and_encode(dst->encoding()); 3363 if (imm8 == 1 ) { 3364 emit_int8((unsigned char)0xD1); 3365 emit_int8((unsigned char)(0xE0 | encode)); 3366 } else { 3367 emit_int8((unsigned char)0xC1); 3368 emit_int8((unsigned char)(0xE0 | encode)); 3369 emit_int8(imm8); 3370 } 3371 } 3372 3373 void Assembler::shll(Register dst) { 3374 int encode = prefix_and_encode(dst->encoding()); 3375 emit_int8((unsigned char)0xD3); 3376 emit_int8((unsigned char)(0xE0 | encode)); 3377 } 3378 3379 void Assembler::shrl(Register dst, int imm8) { 3380 assert(isShiftCount(imm8), "illegal shift count"); 3381 int encode = prefix_and_encode(dst->encoding()); 3382 emit_int8((unsigned char)0xC1); 3383 emit_int8((unsigned char)(0xE8 | encode)); 3384 emit_int8(imm8); 3385 } 3386 3387 void Assembler::shrl(Register dst) { 3388 int encode = prefix_and_encode(dst->encoding()); 3389 emit_int8((unsigned char)0xD3); 3390 emit_int8((unsigned char)(0xE8 | encode)); 3391 } 3392 3393 // copies a single word from [esi] to [edi] 3394 void Assembler::smovl() { 3395 emit_int8((unsigned char)0xA5); 3396 } 3397 3398 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { 3399 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3400 if (VM_Version::supports_evex()) { 3401 emit_simd_arith_q(0x51, dst, src, VEX_SIMD_F2); 3402 } else { 3403 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2); 3404 } 3405 } 3406 3407 void Assembler::sqrtsd(XMMRegister dst, Address src) { 3408 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3409 if (VM_Version::supports_evex()) { 3410 tuple_type = EVEX_T1S; 3411 input_size_in_bits = EVEX_64bit; 3412 emit_simd_arith_q(0x51, dst, src, VEX_SIMD_F2); 3413 } else { 3414 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2); 3415 } 3416 } 3417 3418 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 3419 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3420 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3); 3421 } 3422 3423 void Assembler::std() { 3424 emit_int8((unsigned char)0xFD); 3425 } 3426 3427 void Assembler::sqrtss(XMMRegister dst, Address src) { 3428 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3429 if (VM_Version::supports_evex()) { 3430 tuple_type = EVEX_T1S; 3431 input_size_in_bits = EVEX_32bit; 3432 } 3433 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3); 3434 } 3435 3436 void Assembler::stmxcsr( Address dst) { 3437 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3438 InstructionMark im(this); 3439 prefix(dst); 3440 emit_int8(0x0F); 3441 emit_int8((unsigned char)0xAE); 3442 emit_operand(as_Register(3), dst); 3443 } 3444 3445 void Assembler::subl(Address dst, int32_t imm32) { 3446 InstructionMark im(this); 3447 prefix(dst); 3448 emit_arith_operand(0x81, rbp, dst, imm32); 3449 } 3450 3451 void Assembler::subl(Address dst, Register src) { 3452 InstructionMark im(this); 3453 prefix(dst, src); 3454 emit_int8(0x29); 3455 emit_operand(src, dst); 3456 } 3457 3458 void Assembler::subl(Register dst, int32_t imm32) { 3459 prefix(dst); 3460 emit_arith(0x81, 0xE8, dst, imm32); 3461 } 3462 3463 // Force generation of a 4 byte immediate value even if it fits into 8bit 3464 void Assembler::subl_imm32(Register dst, int32_t imm32) { 3465 prefix(dst); 3466 emit_arith_imm32(0x81, 0xE8, dst, imm32); 3467 } 3468 3469 void Assembler::subl(Register dst, Address src) { 3470 InstructionMark im(this); 3471 prefix(src, dst); 3472 emit_int8(0x2B); 3473 emit_operand(dst, src); 3474 } 3475 3476 void Assembler::subl(Register dst, Register src) { 3477 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3478 emit_arith(0x2B, 0xC0, dst, src); 3479 } 3480 3481 void Assembler::subsd(XMMRegister dst, XMMRegister src) { 3482 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3483 if (VM_Version::supports_evex()) { 3484 emit_simd_arith_q(0x5C, dst, src, VEX_SIMD_F2); 3485 } else { 3486 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2); 3487 } 3488 } 3489 3490 void Assembler::subsd(XMMRegister dst, Address src) { 3491 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3492 if (VM_Version::supports_evex()) { 3493 tuple_type = EVEX_T1S; 3494 input_size_in_bits = EVEX_64bit; 3495 } 3496 emit_simd_arith_q(0x5C, dst, src, VEX_SIMD_F2); 3497 } 3498 3499 void Assembler::subss(XMMRegister dst, XMMRegister src) { 3500 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3501 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3); 3502 } 3503 3504 void Assembler::subss(XMMRegister dst, Address src) { 3505 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3506 if (VM_Version::supports_evex()) { 3507 tuple_type = EVEX_T1S; 3508 input_size_in_bits = EVEX_32bit; 3509 } 3510 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3); 3511 } 3512 3513 void Assembler::testb(Register dst, int imm8) { 3514 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 3515 (void) prefix_and_encode(dst->encoding(), true); 3516 emit_arith_b(0xF6, 0xC0, dst, imm8); 3517 } 3518 3519 void Assembler::testl(Register dst, int32_t imm32) { 3520 // not using emit_arith because test 3521 // doesn't support sign-extension of 3522 // 8bit operands 3523 int encode = dst->encoding(); 3524 if (encode == 0) { 3525 emit_int8((unsigned char)0xA9); 3526 } else { 3527 encode = prefix_and_encode(encode); 3528 emit_int8((unsigned char)0xF7); 3529 emit_int8((unsigned char)(0xC0 | encode)); 3530 } 3531 emit_int32(imm32); 3532 } 3533 3534 void Assembler::testl(Register dst, Register src) { 3535 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3536 emit_arith(0x85, 0xC0, dst, src); 3537 } 3538 3539 void Assembler::testl(Register dst, Address src) { 3540 InstructionMark im(this); 3541 prefix(src, dst); 3542 emit_int8((unsigned char)0x85); 3543 emit_operand(dst, src); 3544 } 3545 3546 void Assembler::tzcntl(Register dst, Register src) { 3547 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 3548 emit_int8((unsigned char)0xF3); 3549 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3550 emit_int8(0x0F); 3551 emit_int8((unsigned char)0xBC); 3552 emit_int8((unsigned char)0xC0 | encode); 3553 } 3554 3555 void Assembler::tzcntq(Register dst, Register src) { 3556 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 3557 emit_int8((unsigned char)0xF3); 3558 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 3559 emit_int8(0x0F); 3560 emit_int8((unsigned char)0xBC); 3561 emit_int8((unsigned char)(0xC0 | encode)); 3562 } 3563 3564 void Assembler::ucomisd(XMMRegister dst, Address src) { 3565 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3566 if (VM_Version::supports_evex()) { 3567 tuple_type = EVEX_T1S; 3568 input_size_in_bits = EVEX_64bit; 3569 emit_simd_arith_nonds_q(0x2E, dst, src, VEX_SIMD_66, true); 3570 } else { 3571 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66); 3572 } 3573 } 3574 3575 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { 3576 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3577 if (VM_Version::supports_evex()) { 3578 emit_simd_arith_nonds_q(0x2E, dst, src, VEX_SIMD_66, true); 3579 } else { 3580 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66); 3581 } 3582 } 3583 3584 void Assembler::ucomiss(XMMRegister dst, Address src) { 3585 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3586 if (VM_Version::supports_evex()) { 3587 tuple_type = EVEX_T1S; 3588 input_size_in_bits = EVEX_32bit; 3589 } 3590 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE, true); 3591 } 3592 3593 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { 3594 NOT_LP64(assert(VM_Version::supports_sse(), "")); 3595 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE, true); 3596 } 3597 3598 void Assembler::xabort(int8_t imm8) { 3599 emit_int8((unsigned char)0xC6); 3600 emit_int8((unsigned char)0xF8); 3601 emit_int8((unsigned char)(imm8 & 0xFF)); 3602 } 3603 3604 void Assembler::xaddl(Address dst, Register src) { 3605 InstructionMark im(this); 3606 prefix(dst, src); 3607 emit_int8(0x0F); 3608 emit_int8((unsigned char)0xC1); 3609 emit_operand(src, dst); 3610 } 3611 3612 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { 3613 InstructionMark im(this); 3614 relocate(rtype); 3615 if (abort.is_bound()) { 3616 address entry = target(abort); 3617 assert(entry != NULL, "abort entry NULL"); 3618 intptr_t offset = entry - pc(); 3619 emit_int8((unsigned char)0xC7); 3620 emit_int8((unsigned char)0xF8); 3621 emit_int32(offset - 6); // 2 opcode + 4 address 3622 } else { 3623 abort.add_patch_at(code(), locator()); 3624 emit_int8((unsigned char)0xC7); 3625 emit_int8((unsigned char)0xF8); 3626 emit_int32(0); 3627 } 3628 } 3629 3630 void Assembler::xchgl(Register dst, Address src) { // xchg 3631 InstructionMark im(this); 3632 prefix(src, dst); 3633 emit_int8((unsigned char)0x87); 3634 emit_operand(dst, src); 3635 } 3636 3637 void Assembler::xchgl(Register dst, Register src) { 3638 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3639 emit_int8((unsigned char)0x87); 3640 emit_int8((unsigned char)(0xC0 | encode)); 3641 } 3642 3643 void Assembler::xend() { 3644 emit_int8((unsigned char)0x0F); 3645 emit_int8((unsigned char)0x01); 3646 emit_int8((unsigned char)0xD5); 3647 } 3648 3649 void Assembler::xgetbv() { 3650 emit_int8(0x0F); 3651 emit_int8(0x01); 3652 emit_int8((unsigned char)0xD0); 3653 } 3654 3655 void Assembler::xorl(Register dst, int32_t imm32) { 3656 prefix(dst); 3657 emit_arith(0x81, 0xF0, dst, imm32); 3658 } 3659 3660 void Assembler::xorl(Register dst, Address src) { 3661 InstructionMark im(this); 3662 prefix(src, dst); 3663 emit_int8(0x33); 3664 emit_operand(dst, src); 3665 } 3666 3667 void Assembler::xorl(Register dst, Register src) { 3668 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3669 emit_arith(0x33, 0xC0, dst, src); 3670 } 3671 3672 3673 // AVX 3-operands scalar float-point arithmetic instructions 3674 3675 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { 3676 assert(VM_Version::supports_avx(), ""); 3677 if (VM_Version::supports_evex()) { 3678 tuple_type = EVEX_T1S; 3679 input_size_in_bits = EVEX_64bit; 3680 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3681 } else { 3682 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3683 } 3684 } 3685 3686 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3687 assert(VM_Version::supports_avx(), ""); 3688 if (VM_Version::supports_evex()) { 3689 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3690 } else { 3691 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3692 } 3693 } 3694 3695 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { 3696 assert(VM_Version::supports_avx(), ""); 3697 if (VM_Version::supports_evex()) { 3698 tuple_type = EVEX_T1S; 3699 input_size_in_bits = EVEX_32bit; 3700 } 3701 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3702 } 3703 3704 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3705 assert(VM_Version::supports_avx(), ""); 3706 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3707 } 3708 3709 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { 3710 assert(VM_Version::supports_avx(), ""); 3711 if (VM_Version::supports_evex()) { 3712 tuple_type = EVEX_T1S; 3713 input_size_in_bits = EVEX_64bit; 3714 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3715 } else { 3716 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3717 } 3718 } 3719 3720 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3721 assert(VM_Version::supports_avx(), ""); 3722 if (VM_Version::supports_evex()) { 3723 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3724 } else { 3725 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3726 } 3727 } 3728 3729 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { 3730 assert(VM_Version::supports_avx(), ""); 3731 if (VM_Version::supports_evex()) { 3732 tuple_type = EVEX_T1S; 3733 input_size_in_bits = EVEX_32bit; 3734 } 3735 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3736 } 3737 3738 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3739 assert(VM_Version::supports_avx(), ""); 3740 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3741 } 3742 3743 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { 3744 assert(VM_Version::supports_avx(), ""); 3745 if (VM_Version::supports_evex()) { 3746 tuple_type = EVEX_T1S; 3747 input_size_in_bits = EVEX_64bit; 3748 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3749 } else { 3750 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3751 } 3752 } 3753 3754 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3755 assert(VM_Version::supports_avx(), ""); 3756 if (VM_Version::supports_evex()) { 3757 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3758 } else { 3759 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3760 } 3761 } 3762 3763 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { 3764 assert(VM_Version::supports_avx(), ""); 3765 if (VM_Version::supports_evex()) { 3766 tuple_type = EVEX_T1S; 3767 input_size_in_bits = EVEX_32bit; 3768 } 3769 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3770 } 3771 3772 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3773 assert(VM_Version::supports_avx(), ""); 3774 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3775 } 3776 3777 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { 3778 assert(VM_Version::supports_avx(), ""); 3779 if (VM_Version::supports_evex()) { 3780 tuple_type = EVEX_T1S; 3781 input_size_in_bits = EVEX_64bit; 3782 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3783 } else { 3784 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3785 } 3786 } 3787 3788 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3789 assert(VM_Version::supports_avx(), ""); 3790 if (VM_Version::supports_evex()) { 3791 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3792 } else { 3793 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, AVX_128bit); 3794 } 3795 } 3796 3797 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { 3798 assert(VM_Version::supports_avx(), ""); 3799 if (VM_Version::supports_evex()) { 3800 tuple_type = EVEX_T1S; 3801 input_size_in_bits = EVEX_32bit; 3802 } 3803 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3804 } 3805 3806 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 3807 assert(VM_Version::supports_avx(), ""); 3808 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, AVX_128bit); 3809 } 3810 3811 //====================VECTOR ARITHMETIC===================================== 3812 3813 // Float-point vector arithmetic 3814 3815 void Assembler::addpd(XMMRegister dst, XMMRegister src) { 3816 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3817 if (VM_Version::supports_evex()) { 3818 emit_simd_arith_q(0x58, dst, src, VEX_SIMD_66); 3819 } else { 3820 emit_simd_arith(0x58, dst, src, VEX_SIMD_66); 3821 } 3822 } 3823 3824 void Assembler::addps(XMMRegister dst, XMMRegister src) { 3825 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3826 emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE); 3827 } 3828 3829 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3830 assert(VM_Version::supports_avx(), ""); 3831 if (VM_Version::supports_evex()) { 3832 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3833 } else { 3834 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3835 } 3836 } 3837 3838 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3839 assert(VM_Version::supports_avx(), ""); 3840 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector_len); 3841 } 3842 3843 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3844 assert(VM_Version::supports_avx(), ""); 3845 if (VM_Version::supports_evex()) { 3846 tuple_type = EVEX_FV; 3847 input_size_in_bits = EVEX_64bit; 3848 emit_vex_arith_q(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3849 } else { 3850 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector_len); 3851 } 3852 } 3853 3854 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3855 assert(VM_Version::supports_avx(), ""); 3856 if (VM_Version::supports_evex()) { 3857 tuple_type = EVEX_FV; 3858 input_size_in_bits = EVEX_32bit; 3859 } 3860 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector_len); 3861 } 3862 3863 void Assembler::subpd(XMMRegister dst, XMMRegister src) { 3864 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3865 if (VM_Version::supports_evex()) { 3866 emit_simd_arith_q(0x5C, dst, src, VEX_SIMD_66); 3867 } else { 3868 emit_simd_arith(0x5C, dst, src, VEX_SIMD_66); 3869 } 3870 } 3871 3872 void Assembler::subps(XMMRegister dst, XMMRegister src) { 3873 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3874 emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE); 3875 } 3876 3877 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3878 assert(VM_Version::supports_avx(), ""); 3879 if (VM_Version::supports_evex()) { 3880 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3881 } else { 3882 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3883 } 3884 } 3885 3886 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3887 assert(VM_Version::supports_avx(), ""); 3888 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector_len); 3889 } 3890 3891 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3892 assert(VM_Version::supports_avx(), ""); 3893 if (VM_Version::supports_evex()) { 3894 tuple_type = EVEX_FV; 3895 input_size_in_bits = EVEX_64bit; 3896 emit_vex_arith_q(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3897 } else { 3898 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector_len); 3899 } 3900 } 3901 3902 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3903 assert(VM_Version::supports_avx(), ""); 3904 if (VM_Version::supports_evex()) { 3905 tuple_type = EVEX_FV; 3906 input_size_in_bits = EVEX_32bit; 3907 } 3908 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector_len); 3909 } 3910 3911 void Assembler::mulpd(XMMRegister dst, XMMRegister src) { 3912 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3913 if (VM_Version::supports_evex()) { 3914 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_66); 3915 } else { 3916 emit_simd_arith(0x59, dst, src, VEX_SIMD_66); 3917 } 3918 } 3919 3920 void Assembler::mulpd(XMMRegister dst, Address src) { 3921 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3922 if (VM_Version::supports_evex()) { 3923 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_66); 3924 } else { 3925 emit_simd_arith(0x59, dst, src, VEX_SIMD_66); 3926 } 3927 } 3928 3929 void Assembler::mulps(XMMRegister dst, XMMRegister src) { 3930 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3931 emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE); 3932 } 3933 3934 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3935 assert(VM_Version::supports_avx(), ""); 3936 if (VM_Version::supports_evex()) { 3937 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3938 } else { 3939 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3940 } 3941 } 3942 3943 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3944 assert(VM_Version::supports_avx(), ""); 3945 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector_len); 3946 } 3947 3948 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3949 assert(VM_Version::supports_avx(), ""); 3950 if (VM_Version::supports_evex()) { 3951 tuple_type = EVEX_FV; 3952 input_size_in_bits = EVEX_64bit; 3953 emit_vex_arith_q(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3954 } else { 3955 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector_len); 3956 } 3957 } 3958 3959 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3960 assert(VM_Version::supports_avx(), ""); 3961 if (VM_Version::supports_evex()) { 3962 tuple_type = EVEX_FV; 3963 input_size_in_bits = EVEX_32bit; 3964 } 3965 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector_len); 3966 } 3967 3968 void Assembler::divpd(XMMRegister dst, XMMRegister src) { 3969 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3970 if (VM_Version::supports_evex()) { 3971 emit_simd_arith_q(0x5E, dst, src, VEX_SIMD_66); 3972 } else { 3973 emit_simd_arith(0x5E, dst, src, VEX_SIMD_66); 3974 } 3975 } 3976 3977 void Assembler::divps(XMMRegister dst, XMMRegister src) { 3978 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3979 emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE); 3980 } 3981 3982 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3983 assert(VM_Version::supports_avx(), ""); 3984 if (VM_Version::supports_evex()) { 3985 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3986 } else { 3987 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 3988 } 3989 } 3990 3991 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3992 assert(VM_Version::supports_avx(), ""); 3993 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector_len); 3994 } 3995 3996 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3997 assert(VM_Version::supports_avx(), ""); 3998 if (VM_Version::supports_evex()) { 3999 tuple_type = EVEX_FV; 4000 input_size_in_bits = EVEX_64bit; 4001 emit_vex_arith_q(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 4002 } else { 4003 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector_len); 4004 } 4005 } 4006 4007 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4008 assert(VM_Version::supports_avx(), ""); 4009 if (VM_Version::supports_evex()) { 4010 tuple_type = EVEX_FV; 4011 input_size_in_bits = EVEX_32bit; 4012 } 4013 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector_len); 4014 } 4015 4016 void Assembler::andpd(XMMRegister dst, XMMRegister src) { 4017 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4018 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4019 emit_simd_arith_q(0x54, dst, src, VEX_SIMD_66); 4020 } else { 4021 emit_simd_arith(0x54, dst, src, VEX_SIMD_66, false, true); 4022 } 4023 } 4024 4025 void Assembler::andps(XMMRegister dst, XMMRegister src) { 4026 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4027 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE, false, 4028 (VM_Version::supports_avx512dq() == false)); 4029 } 4030 4031 void Assembler::andps(XMMRegister dst, Address src) { 4032 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4033 if (VM_Version::supports_evex()) { 4034 tuple_type = EVEX_FV; 4035 input_size_in_bits = EVEX_32bit; 4036 } 4037 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE, 4038 false, (VM_Version::supports_avx512dq() == false)); 4039 } 4040 4041 void Assembler::andpd(XMMRegister dst, Address src) { 4042 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4043 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4044 tuple_type = EVEX_FV; 4045 input_size_in_bits = EVEX_64bit; 4046 emit_simd_arith_q(0x54, dst, src, VEX_SIMD_66); 4047 } else { 4048 emit_simd_arith(0x54, dst, src, VEX_SIMD_66, false, true); 4049 } 4050 } 4051 4052 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4053 assert(VM_Version::supports_avx(), ""); 4054 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4055 emit_vex_arith_q(0x54, dst, nds, src, VEX_SIMD_66, vector_len); 4056 } else { 4057 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector_len, true); 4058 } 4059 } 4060 4061 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4062 assert(VM_Version::supports_avx(), ""); 4063 bool legacy_mode = (VM_Version::supports_avx512dq() == false); 4064 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, legacy_mode); 4065 } 4066 4067 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4068 assert(VM_Version::supports_avx(), ""); 4069 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4070 tuple_type = EVEX_FV; 4071 input_size_in_bits = EVEX_64bit; 4072 emit_vex_arith_q(0x54, dst, nds, src, VEX_SIMD_66, vector_len); 4073 } else { 4074 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector_len, true); 4075 } 4076 } 4077 4078 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4079 assert(VM_Version::supports_avx(), ""); 4080 if (VM_Version::supports_evex()) { 4081 tuple_type = EVEX_FV; 4082 input_size_in_bits = EVEX_32bit; 4083 } 4084 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, 4085 (VM_Version::supports_avx512dq() == false)); 4086 } 4087 4088 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) { 4089 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4090 if (VM_Version::supports_evex()) { 4091 emit_simd_arith_q(0x15, dst, src, VEX_SIMD_66); 4092 } else { 4093 emit_simd_arith(0x15, dst, src, VEX_SIMD_66); 4094 } 4095 } 4096 4097 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { 4098 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4099 if (VM_Version::supports_evex()) { 4100 emit_simd_arith_q(0x14, dst, src, VEX_SIMD_66); 4101 } else { 4102 emit_simd_arith(0x14, dst, src, VEX_SIMD_66); 4103 } 4104 } 4105 4106 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { 4107 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4108 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4109 emit_simd_arith_q(0x57, dst, src, VEX_SIMD_66); 4110 } else { 4111 emit_simd_arith(0x57, dst, src, VEX_SIMD_66, false, true); 4112 } 4113 } 4114 4115 void Assembler::xorps(XMMRegister dst, XMMRegister src) { 4116 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4117 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE, 4118 false, (VM_Version::supports_avx512dq() == false)); 4119 } 4120 4121 void Assembler::xorpd(XMMRegister dst, Address src) { 4122 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4123 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4124 tuple_type = EVEX_FV; 4125 input_size_in_bits = EVEX_64bit; 4126 emit_simd_arith_q(0x57, dst, src, VEX_SIMD_66); 4127 } else { 4128 emit_simd_arith(0x57, dst, src, VEX_SIMD_66, false, true); 4129 } 4130 } 4131 4132 void Assembler::xorps(XMMRegister dst, Address src) { 4133 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4134 if (VM_Version::supports_evex()) { 4135 tuple_type = EVEX_FV; 4136 input_size_in_bits = EVEX_32bit; 4137 } 4138 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE, false, 4139 (VM_Version::supports_avx512dq() == false)); 4140 } 4141 4142 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4143 assert(VM_Version::supports_avx(), ""); 4144 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4145 emit_vex_arith_q(0x57, dst, nds, src, VEX_SIMD_66, vector_len); 4146 } else { 4147 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector_len, true); 4148 } 4149 } 4150 4151 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4152 assert(VM_Version::supports_avx(), ""); 4153 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector_len, 4154 (VM_Version::supports_avx512dq() == false)); 4155 } 4156 4157 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4158 assert(VM_Version::supports_avx(), ""); 4159 if (VM_Version::supports_evex() && VM_Version::supports_avx512dq()) { 4160 tuple_type = EVEX_FV; 4161 input_size_in_bits = EVEX_64bit; 4162 emit_vex_arith_q(0x57, dst, nds, src, VEX_SIMD_66, vector_len); 4163 } else { 4164 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector_len, true); 4165 } 4166 } 4167 4168 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4169 assert(VM_Version::supports_avx(), ""); 4170 if (VM_Version::supports_evex()) { 4171 tuple_type = EVEX_FV; 4172 input_size_in_bits = EVEX_32bit; 4173 } 4174 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector_len, 4175 (VM_Version::supports_avx512dq() == false)); 4176 } 4177 4178 // Integer vector arithmetic 4179 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4180 assert(VM_Version::supports_avx() && (vector_len == 0) || 4181 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 4182 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, 4183 VEX_OPCODE_0F_38, true, false); 4184 emit_int8(0x01); 4185 emit_int8((unsigned char)(0xC0 | encode)); 4186 } 4187 4188 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4189 assert(VM_Version::supports_avx() && (vector_len == 0) || 4190 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 4191 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, 4192 VEX_OPCODE_0F_38, true, false); 4193 emit_int8(0x02); 4194 emit_int8((unsigned char)(0xC0 | encode)); 4195 } 4196 4197 void Assembler::paddb(XMMRegister dst, XMMRegister src) { 4198 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4199 emit_simd_arith(0xFC, dst, src, VEX_SIMD_66); 4200 } 4201 4202 void Assembler::paddw(XMMRegister dst, XMMRegister src) { 4203 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4204 emit_simd_arith(0xFD, dst, src, VEX_SIMD_66); 4205 } 4206 4207 void Assembler::paddd(XMMRegister dst, XMMRegister src) { 4208 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4209 emit_simd_arith(0xFE, dst, src, VEX_SIMD_66); 4210 } 4211 4212 void Assembler::paddq(XMMRegister dst, XMMRegister src) { 4213 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4214 if (VM_Version::supports_evex()) { 4215 emit_simd_arith_q(0xD4, dst, src, VEX_SIMD_66); 4216 } else { 4217 emit_simd_arith(0xD4, dst, src, VEX_SIMD_66); 4218 } 4219 } 4220 4221 void Assembler::phaddw(XMMRegister dst, XMMRegister src) { 4222 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 4223 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 4224 VEX_OPCODE_0F_38, false, AVX_128bit, true); 4225 emit_int8(0x01); 4226 emit_int8((unsigned char)(0xC0 | encode)); 4227 } 4228 4229 void Assembler::phaddd(XMMRegister dst, XMMRegister src) { 4230 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 4231 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 4232 VEX_OPCODE_0F_38, false, AVX_128bit, true); 4233 emit_int8(0x02); 4234 emit_int8((unsigned char)(0xC0 | encode)); 4235 } 4236 4237 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4238 assert(UseAVX > 0, "requires some form of AVX"); 4239 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector_len, 4240 (VM_Version::supports_avx512bw() == false)); 4241 } 4242 4243 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4244 assert(UseAVX > 0, "requires some form of AVX"); 4245 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector_len, 4246 (VM_Version::supports_avx512bw() == false)); 4247 } 4248 4249 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4250 assert(UseAVX > 0, "requires some form of AVX"); 4251 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector_len); 4252 } 4253 4254 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4255 assert(UseAVX > 0, "requires some form of AVX"); 4256 if (VM_Version::supports_evex()) { 4257 emit_vex_arith_q(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4258 } else { 4259 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4260 } 4261 } 4262 4263 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4264 assert(UseAVX > 0, "requires some form of AVX"); 4265 if (VM_Version::supports_evex()) { 4266 tuple_type = EVEX_FVM; 4267 } 4268 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector_len); 4269 } 4270 4271 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4272 assert(UseAVX > 0, "requires some form of AVX"); 4273 if (VM_Version::supports_evex()) { 4274 tuple_type = EVEX_FVM; 4275 } 4276 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector_len); 4277 } 4278 4279 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4280 assert(UseAVX > 0, "requires some form of AVX"); 4281 if (VM_Version::supports_evex()) { 4282 tuple_type = EVEX_FV; 4283 input_size_in_bits = EVEX_32bit; 4284 } 4285 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector_len); 4286 } 4287 4288 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4289 assert(UseAVX > 0, "requires some form of AVX"); 4290 if (VM_Version::supports_evex()) { 4291 tuple_type = EVEX_FV; 4292 input_size_in_bits = EVEX_64bit; 4293 emit_vex_arith_q(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4294 } else { 4295 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector_len); 4296 } 4297 } 4298 4299 void Assembler::psubb(XMMRegister dst, XMMRegister src) { 4300 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4301 emit_simd_arith(0xF8, dst, src, VEX_SIMD_66); 4302 } 4303 4304 void Assembler::psubw(XMMRegister dst, XMMRegister src) { 4305 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4306 emit_simd_arith(0xF9, dst, src, VEX_SIMD_66); 4307 } 4308 4309 void Assembler::psubd(XMMRegister dst, XMMRegister src) { 4310 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4311 emit_simd_arith(0xFA, dst, src, VEX_SIMD_66); 4312 } 4313 4314 void Assembler::psubq(XMMRegister dst, XMMRegister src) { 4315 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4316 if (VM_Version::supports_evex()) { 4317 emit_simd_arith_q(0xFB, dst, src, VEX_SIMD_66); 4318 } else { 4319 emit_simd_arith(0xFB, dst, src, VEX_SIMD_66); 4320 } 4321 } 4322 4323 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4324 assert(UseAVX > 0, "requires some form of AVX"); 4325 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector_len, 4326 (VM_Version::supports_avx512bw() == false)); 4327 } 4328 4329 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4330 assert(UseAVX > 0, "requires some form of AVX"); 4331 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector_len, 4332 (VM_Version::supports_avx512bw() == false)); 4333 } 4334 4335 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4336 assert(UseAVX > 0, "requires some form of AVX"); 4337 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector_len); 4338 } 4339 4340 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4341 assert(UseAVX > 0, "requires some form of AVX"); 4342 if (VM_Version::supports_evex()) { 4343 emit_vex_arith_q(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4344 } else { 4345 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4346 } 4347 } 4348 4349 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4350 assert(UseAVX > 0, "requires some form of AVX"); 4351 if (VM_Version::supports_evex()) { 4352 tuple_type = EVEX_FVM; 4353 } 4354 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector_len, 4355 (VM_Version::supports_avx512bw() == false)); 4356 } 4357 4358 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4359 assert(UseAVX > 0, "requires some form of AVX"); 4360 if (VM_Version::supports_evex()) { 4361 tuple_type = EVEX_FVM; 4362 } 4363 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector_len, 4364 (VM_Version::supports_avx512bw() == false)); 4365 } 4366 4367 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4368 assert(UseAVX > 0, "requires some form of AVX"); 4369 if (VM_Version::supports_evex()) { 4370 tuple_type = EVEX_FV; 4371 input_size_in_bits = EVEX_32bit; 4372 } 4373 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector_len); 4374 } 4375 4376 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4377 assert(UseAVX > 0, "requires some form of AVX"); 4378 if (VM_Version::supports_evex()) { 4379 tuple_type = EVEX_FV; 4380 input_size_in_bits = EVEX_64bit; 4381 emit_vex_arith_q(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4382 } else { 4383 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector_len); 4384 } 4385 } 4386 4387 void Assembler::pmullw(XMMRegister dst, XMMRegister src) { 4388 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4389 emit_simd_arith(0xD5, dst, src, VEX_SIMD_66, 4390 (VM_Version::supports_avx512bw() == false)); 4391 } 4392 4393 void Assembler::pmulld(XMMRegister dst, XMMRegister src) { 4394 assert(VM_Version::supports_sse4_1(), ""); 4395 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, 4396 false, VEX_OPCODE_0F_38); 4397 emit_int8(0x40); 4398 emit_int8((unsigned char)(0xC0 | encode)); 4399 } 4400 4401 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4402 assert(UseAVX > 0, "requires some form of AVX"); 4403 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector_len, 4404 (VM_Version::supports_avx512bw() == false)); 4405 } 4406 4407 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4408 assert(UseAVX > 0, "requires some form of AVX"); 4409 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, 4410 vector_len, VEX_OPCODE_0F_38); 4411 emit_int8(0x40); 4412 emit_int8((unsigned char)(0xC0 | encode)); 4413 } 4414 4415 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4416 assert(UseAVX > 2, "requires some form of AVX"); 4417 int src_enc = src->encoding(); 4418 int dst_enc = dst->encoding(); 4419 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4420 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, VEX_SIMD_66, 4421 VEX_OPCODE_0F_38, true, vector_len, false, false); 4422 emit_int8(0x40); 4423 emit_int8((unsigned char)(0xC0 | encode)); 4424 } 4425 4426 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4427 assert(UseAVX > 0, "requires some form of AVX"); 4428 if (VM_Version::supports_evex()) { 4429 tuple_type = EVEX_FVM; 4430 } 4431 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector_len); 4432 } 4433 4434 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4435 assert(UseAVX > 0, "requires some form of AVX"); 4436 if (VM_Version::supports_evex()) { 4437 tuple_type = EVEX_FV; 4438 input_size_in_bits = EVEX_32bit; 4439 } 4440 InstructionMark im(this); 4441 int dst_enc = dst->encoding(); 4442 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4443 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, 4444 VEX_OPCODE_0F_38, false, vector_len); 4445 emit_int8(0x40); 4446 emit_operand(dst, src); 4447 } 4448 4449 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4450 assert(UseAVX > 0, "requires some form of AVX"); 4451 if (VM_Version::supports_evex()) { 4452 tuple_type = EVEX_FV; 4453 input_size_in_bits = EVEX_64bit; 4454 } 4455 InstructionMark im(this); 4456 int dst_enc = dst->encoding(); 4457 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4458 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len); 4459 emit_int8(0x40); 4460 emit_operand(dst, src); 4461 } 4462 4463 // Shift packed integers left by specified number of bits. 4464 void Assembler::psllw(XMMRegister dst, int shift) { 4465 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4466 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 4467 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, 4468 false, AVX_128bit, (VM_Version::supports_avx512bw() == false)); 4469 emit_int8(0x71); 4470 emit_int8((unsigned char)(0xC0 | encode)); 4471 emit_int8(shift & 0xFF); 4472 } 4473 4474 void Assembler::pslld(XMMRegister dst, int shift) { 4475 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4476 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 4477 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, false); 4478 emit_int8(0x72); 4479 emit_int8((unsigned char)(0xC0 | encode)); 4480 emit_int8(shift & 0xFF); 4481 } 4482 4483 void Assembler::psllq(XMMRegister dst, int shift) { 4484 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4485 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 4486 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, true); 4487 emit_int8(0x73); 4488 emit_int8((unsigned char)(0xC0 | encode)); 4489 emit_int8(shift & 0xFF); 4490 } 4491 4492 void Assembler::psllw(XMMRegister dst, XMMRegister shift) { 4493 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4494 emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66, false, 4495 (VM_Version::supports_avx512bw() == false)); 4496 } 4497 4498 void Assembler::pslld(XMMRegister dst, XMMRegister shift) { 4499 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4500 emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66); 4501 } 4502 4503 void Assembler::psllq(XMMRegister dst, XMMRegister shift) { 4504 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4505 if (VM_Version::supports_evex()) { 4506 emit_simd_arith_q(0xF3, dst, shift, VEX_SIMD_66); 4507 } else { 4508 emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66); 4509 } 4510 } 4511 4512 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4513 assert(UseAVX > 0, "requires some form of AVX"); 4514 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 4515 emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector_len, 4516 (VM_Version::supports_avx512bw() == false)); 4517 emit_int8(shift & 0xFF); 4518 } 4519 4520 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4521 assert(UseAVX > 0, "requires some form of AVX"); 4522 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 4523 emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector_len); 4524 emit_int8(shift & 0xFF); 4525 } 4526 4527 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4528 assert(UseAVX > 0, "requires some form of AVX"); 4529 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 4530 if (VM_Version::supports_evex()) { 4531 emit_vex_arith_q(0x73, xmm6, dst, src, VEX_SIMD_66, vector_len); 4532 } else { 4533 emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector_len); 4534 } 4535 emit_int8(shift & 0xFF); 4536 } 4537 4538 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4539 assert(UseAVX > 0, "requires some form of AVX"); 4540 emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector_len, 4541 (VM_Version::supports_avx512bw() == false)); 4542 } 4543 4544 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4545 assert(UseAVX > 0, "requires some form of AVX"); 4546 emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector_len); 4547 } 4548 4549 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4550 assert(UseAVX > 0, "requires some form of AVX"); 4551 if (VM_Version::supports_evex()) { 4552 emit_vex_arith_q(0xF3, dst, src, shift, VEX_SIMD_66, vector_len); 4553 } else { 4554 emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector_len); 4555 } 4556 } 4557 4558 // Shift packed integers logically right by specified number of bits. 4559 void Assembler::psrlw(XMMRegister dst, int shift) { 4560 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4561 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 4562 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, 4563 (VM_Version::supports_avx512bw() == false)); 4564 emit_int8(0x71); 4565 emit_int8((unsigned char)(0xC0 | encode)); 4566 emit_int8(shift & 0xFF); 4567 } 4568 4569 void Assembler::psrld(XMMRegister dst, int shift) { 4570 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4571 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 4572 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, false); 4573 emit_int8(0x72); 4574 emit_int8((unsigned char)(0xC0 | encode)); 4575 emit_int8(shift & 0xFF); 4576 } 4577 4578 void Assembler::psrlq(XMMRegister dst, int shift) { 4579 // Do not confuse it with psrldq SSE2 instruction which 4580 // shifts 128 bit value in xmm register by number of bytes. 4581 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4582 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4583 int encode = 0; 4584 if (VM_Version::supports_evex() && VM_Version::supports_avx512bw()) { 4585 encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false); 4586 } else { 4587 encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, true); 4588 } 4589 emit_int8(0x73); 4590 emit_int8((unsigned char)(0xC0 | encode)); 4591 emit_int8(shift & 0xFF); 4592 } 4593 4594 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { 4595 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4596 emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66, false, 4597 (VM_Version::supports_avx512bw() == false)); 4598 } 4599 4600 void Assembler::psrld(XMMRegister dst, XMMRegister shift) { 4601 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4602 emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66); 4603 } 4604 4605 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { 4606 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4607 if (VM_Version::supports_evex()) { 4608 emit_simd_arith_q(0xD3, dst, shift, VEX_SIMD_66); 4609 } else { 4610 emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66); 4611 } 4612 } 4613 4614 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4615 assert(UseAVX > 0, "requires some form of AVX"); 4616 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4617 emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector_len, 4618 (VM_Version::supports_avx512bw() == false)); 4619 emit_int8(shift & 0xFF); 4620 } 4621 4622 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4623 assert(UseAVX > 0, "requires some form of AVX"); 4624 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4625 emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector_len); 4626 emit_int8(shift & 0xFF); 4627 } 4628 4629 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4630 assert(UseAVX > 0, "requires some form of AVX"); 4631 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 4632 if (VM_Version::supports_evex()) { 4633 emit_vex_arith_q(0x73, xmm2, dst, src, VEX_SIMD_66, vector_len); 4634 } else { 4635 emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector_len); 4636 } 4637 emit_int8(shift & 0xFF); 4638 } 4639 4640 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4641 assert(UseAVX > 0, "requires some form of AVX"); 4642 emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector_len, 4643 (VM_Version::supports_avx512bw() == false)); 4644 } 4645 4646 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4647 assert(UseAVX > 0, "requires some form of AVX"); 4648 emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector_len); 4649 } 4650 4651 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4652 assert(UseAVX > 0, "requires some form of AVX"); 4653 if (VM_Version::supports_evex()) { 4654 emit_vex_arith_q(0xD3, dst, src, shift, VEX_SIMD_66, vector_len); 4655 } else { 4656 emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector_len); 4657 } 4658 } 4659 4660 // Shift packed integers arithmetically right by specified number of bits. 4661 void Assembler::psraw(XMMRegister dst, int shift) { 4662 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4663 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 4664 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, false, VEX_OPCODE_0F, 4665 (VM_Version::supports_avx512bw() == false)); 4666 emit_int8(0x71); 4667 emit_int8((unsigned char)(0xC0 | encode)); 4668 emit_int8(shift & 0xFF); 4669 } 4670 4671 void Assembler::psrad(XMMRegister dst, int shift) { 4672 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4673 // XMM4 is for /4 encoding: 66 0F 72 /4 ib 4674 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, false); 4675 emit_int8(0x72); 4676 emit_int8((unsigned char)(0xC0 | encode)); 4677 emit_int8(shift & 0xFF); 4678 } 4679 4680 void Assembler::psraw(XMMRegister dst, XMMRegister shift) { 4681 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4682 emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66, 4683 (VM_Version::supports_avx512bw() == false)); 4684 } 4685 4686 void Assembler::psrad(XMMRegister dst, XMMRegister shift) { 4687 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4688 emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66); 4689 } 4690 4691 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4692 assert(UseAVX > 0, "requires some form of AVX"); 4693 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 4694 emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector_len, 4695 (VM_Version::supports_avx512bw() == false)); 4696 emit_int8(shift & 0xFF); 4697 } 4698 4699 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4700 assert(UseAVX > 0, "requires some form of AVX"); 4701 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 4702 emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector_len); 4703 emit_int8(shift & 0xFF); 4704 } 4705 4706 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4707 assert(UseAVX > 0, "requires some form of AVX"); 4708 emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector_len, 4709 (VM_Version::supports_avx512bw() == false)); 4710 } 4711 4712 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 4713 assert(UseAVX > 0, "requires some form of AVX"); 4714 emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector_len); 4715 } 4716 4717 4718 // AND packed integers 4719 void Assembler::pand(XMMRegister dst, XMMRegister src) { 4720 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4721 emit_simd_arith(0xDB, dst, src, VEX_SIMD_66); 4722 } 4723 4724 void Assembler::pandn(XMMRegister dst, XMMRegister src) { 4725 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4726 if (VM_Version::supports_evex()) { 4727 emit_simd_arith_q(0xDF, dst, src, VEX_SIMD_66); 4728 } else { 4729 emit_simd_arith(0xDF, dst, src, VEX_SIMD_66); 4730 } 4731 } 4732 4733 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4734 assert(UseAVX > 0, "requires some form of AVX"); 4735 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len); 4736 } 4737 4738 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4739 assert(UseAVX > 0, "requires some form of AVX"); 4740 if (VM_Version::supports_evex()) { 4741 tuple_type = EVEX_FV; 4742 input_size_in_bits = EVEX_32bit; 4743 } 4744 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len); 4745 } 4746 4747 void Assembler::por(XMMRegister dst, XMMRegister src) { 4748 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4749 emit_simd_arith(0xEB, dst, src, VEX_SIMD_66); 4750 } 4751 4752 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4753 assert(UseAVX > 0, "requires some form of AVX"); 4754 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector_len); 4755 } 4756 4757 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4758 assert(UseAVX > 0, "requires some form of AVX"); 4759 if (VM_Version::supports_evex()) { 4760 tuple_type = EVEX_FV; 4761 input_size_in_bits = EVEX_32bit; 4762 } 4763 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector_len); 4764 } 4765 4766 void Assembler::pxor(XMMRegister dst, XMMRegister src) { 4767 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4768 emit_simd_arith(0xEF, dst, src, VEX_SIMD_66); 4769 } 4770 4771 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4772 assert(UseAVX > 0, "requires some form of AVX"); 4773 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector_len); 4774 } 4775 4776 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4777 assert(UseAVX > 0, "requires some form of AVX"); 4778 if (VM_Version::supports_evex()) { 4779 tuple_type = EVEX_FV; 4780 input_size_in_bits = EVEX_32bit; 4781 } 4782 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector_len); 4783 } 4784 4785 4786 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4787 assert(VM_Version::supports_avx(), ""); 4788 int vector_len = AVX_256bit; 4789 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4790 emit_int8(0x18); 4791 emit_int8((unsigned char)(0xC0 | encode)); 4792 // 0x00 - insert into lower 128 bits 4793 // 0x01 - insert into upper 128 bits 4794 emit_int8(0x01); 4795 } 4796 4797 void Assembler::vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4798 assert(VM_Version::supports_evex(), ""); 4799 int vector_len = AVX_512bit; 4800 int src_enc = src->encoding(); 4801 int dst_enc = dst->encoding(); 4802 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4803 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, VEX_SIMD_66, 4804 VEX_OPCODE_0F_3A, true, vector_len, false, false); 4805 emit_int8(0x1A); 4806 emit_int8((unsigned char)(0xC0 | encode)); 4807 // 0x00 - insert into lower 256 bits 4808 // 0x01 - insert into upper 256 bits 4809 emit_int8(0x01); 4810 } 4811 4812 void Assembler::vinsertf64x4h(XMMRegister dst, Address src) { 4813 assert(VM_Version::supports_avx(), ""); 4814 if (VM_Version::supports_evex()) { 4815 tuple_type = EVEX_T4; 4816 input_size_in_bits = EVEX_64bit; 4817 } 4818 InstructionMark im(this); 4819 int vector_len = AVX_512bit; 4820 assert(dst != xnoreg, "sanity"); 4821 int dst_enc = dst->encoding(); 4822 // swap src<->dst for encoding 4823 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector_len); 4824 emit_int8(0x1A); 4825 emit_operand(dst, src); 4826 // 0x01 - insert into upper 128 bits 4827 emit_int8(0x01); 4828 } 4829 4830 void Assembler::vinsertf128h(XMMRegister dst, Address src) { 4831 assert(VM_Version::supports_avx(), ""); 4832 if (VM_Version::supports_evex()) { 4833 tuple_type = EVEX_T4; 4834 input_size_in_bits = EVEX_32bit; 4835 } 4836 InstructionMark im(this); 4837 int vector_len = AVX_256bit; 4838 assert(dst != xnoreg, "sanity"); 4839 int dst_enc = dst->encoding(); 4840 // swap src<->dst for encoding 4841 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4842 emit_int8(0x18); 4843 emit_operand(dst, src); 4844 // 0x01 - insert into upper 128 bits 4845 emit_int8(0x01); 4846 } 4847 4848 void Assembler::vextractf128h(XMMRegister dst, XMMRegister src) { 4849 assert(VM_Version::supports_avx(), ""); 4850 int vector_len = AVX_256bit; 4851 int encode = vex_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4852 emit_int8(0x19); 4853 emit_int8((unsigned char)(0xC0 | encode)); 4854 // 0x00 - insert into lower 128 bits 4855 // 0x01 - insert into upper 128 bits 4856 emit_int8(0x01); 4857 } 4858 4859 void Assembler::vextractf128h(Address dst, XMMRegister src) { 4860 assert(VM_Version::supports_avx(), ""); 4861 if (VM_Version::supports_evex()) { 4862 tuple_type = EVEX_T4; 4863 input_size_in_bits = EVEX_32bit; 4864 } 4865 InstructionMark im(this); 4866 int vector_len = AVX_256bit; 4867 assert(src != xnoreg, "sanity"); 4868 int src_enc = src->encoding(); 4869 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4870 emit_int8(0x19); 4871 emit_operand(src, dst); 4872 // 0x01 - extract from upper 128 bits 4873 emit_int8(0x01); 4874 } 4875 4876 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4877 assert(VM_Version::supports_avx2(), ""); 4878 int vector_len = AVX_256bit; 4879 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4880 emit_int8(0x38); 4881 emit_int8((unsigned char)(0xC0 | encode)); 4882 // 0x00 - insert into lower 128 bits 4883 // 0x01 - insert into upper 128 bits 4884 emit_int8(0x01); 4885 } 4886 4887 void Assembler::vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4888 assert(VM_Version::supports_evex(), ""); 4889 int vector_len = AVX_512bit; 4890 int src_enc = src->encoding(); 4891 int dst_enc = dst->encoding(); 4892 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 4893 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4894 VM_Version::supports_avx512dq(), vector_len, false, false); 4895 emit_int8(0x38); 4896 emit_int8((unsigned char)(0xC0 | encode)); 4897 // 0x00 - insert into lower 256 bits 4898 // 0x01 - insert into upper 256 bits 4899 emit_int8(0x01); 4900 } 4901 4902 void Assembler::vinserti128h(XMMRegister dst, Address src) { 4903 assert(VM_Version::supports_avx2(), ""); 4904 if (VM_Version::supports_evex()) { 4905 tuple_type = EVEX_T4; 4906 input_size_in_bits = EVEX_32bit; 4907 } 4908 InstructionMark im(this); 4909 int vector_len = AVX_256bit; 4910 assert(dst != xnoreg, "sanity"); 4911 int dst_enc = dst->encoding(); 4912 // swap src<->dst for encoding 4913 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4914 emit_int8(0x38); 4915 emit_operand(dst, src); 4916 // 0x01 - insert into upper 128 bits 4917 emit_int8(0x01); 4918 } 4919 4920 void Assembler::vextracti128h(XMMRegister dst, XMMRegister src) { 4921 assert(VM_Version::supports_avx(), ""); 4922 int vector_len = AVX_256bit; 4923 int encode = vex_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, vector_len, VEX_OPCODE_0F_3A); 4924 emit_int8(0x39); 4925 emit_int8((unsigned char)(0xC0 | encode)); 4926 // 0x00 - insert into lower 128 bits 4927 // 0x01 - insert into upper 128 bits 4928 emit_int8(0x01); 4929 } 4930 4931 void Assembler::vextracti128h(Address dst, XMMRegister src) { 4932 assert(VM_Version::supports_avx2(), ""); 4933 if (VM_Version::supports_evex()) { 4934 tuple_type = EVEX_T4; 4935 input_size_in_bits = EVEX_32bit; 4936 } 4937 InstructionMark im(this); 4938 int vector_len = AVX_256bit; 4939 assert(src != xnoreg, "sanity"); 4940 int src_enc = src->encoding(); 4941 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector_len); 4942 emit_int8(0x39); 4943 emit_operand(src, dst); 4944 // 0x01 - extract from upper 128 bits 4945 emit_int8(0x01); 4946 } 4947 4948 void Assembler::vextracti64x4h(XMMRegister dst, XMMRegister src) { 4949 assert(VM_Version::supports_evex(), ""); 4950 int vector_len = AVX_512bit; 4951 int src_enc = src->encoding(); 4952 int dst_enc = dst->encoding(); 4953 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4954 true, vector_len, false, false); 4955 emit_int8(0x3B); 4956 emit_int8((unsigned char)(0xC0 | encode)); 4957 // 0x01 - extract from upper 256 bits 4958 emit_int8(0x01); 4959 } 4960 4961 void Assembler::vextracti64x2h(XMMRegister dst, XMMRegister src, int value) { 4962 assert(VM_Version::supports_evex(), ""); 4963 int vector_len = AVX_512bit; 4964 int src_enc = src->encoding(); 4965 int dst_enc = dst->encoding(); 4966 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4967 VM_Version::supports_avx512dq(), vector_len, false, false); 4968 emit_int8(0x39); 4969 emit_int8((unsigned char)(0xC0 | encode)); 4970 // 0x01 - extract from bits 255:128 4971 // 0x02 - extract from bits 383:256 4972 // 0x03 - extract from bits 511:384 4973 emit_int8(value & 0x3); 4974 } 4975 4976 void Assembler::vextractf64x4h(XMMRegister dst, XMMRegister src) { 4977 assert(VM_Version::supports_evex(), ""); 4978 int vector_len = AVX_512bit; 4979 int src_enc = src->encoding(); 4980 int dst_enc = dst->encoding(); 4981 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4982 VM_Version::supports_avx512dq(), vector_len, false, false); 4983 emit_int8(0x1B); 4984 emit_int8((unsigned char)(0xC0 | encode)); 4985 // 0x01 - extract from upper 256 bits 4986 emit_int8(0x01); 4987 } 4988 4989 void Assembler::vextractf64x4h(Address dst, XMMRegister src) { 4990 assert(VM_Version::supports_avx2(), ""); 4991 tuple_type = EVEX_T4; 4992 input_size_in_bits = EVEX_64bit; 4993 InstructionMark im(this); 4994 int vector_len = AVX_512bit; 4995 assert(src != xnoreg, "sanity"); 4996 int src_enc = src->encoding(); 4997 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 4998 VM_Version::supports_avx512dq(), vector_len); 4999 emit_int8(0x1B); 5000 emit_operand(src, dst); 5001 // 0x01 - extract from upper 128 bits 5002 emit_int8(0x01); 5003 } 5004 5005 void Assembler::vextractf32x4h(XMMRegister dst, XMMRegister src, int value) { 5006 assert(VM_Version::supports_evex(), ""); 5007 int vector_len = AVX_512bit; 5008 int src_enc = src->encoding(); 5009 int dst_enc = dst->encoding(); 5010 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, 5011 VEX_OPCODE_0F_3A, false, vector_len, false, false); 5012 emit_int8(0x19); 5013 emit_int8((unsigned char)(0xC0 | encode)); 5014 // 0x01 - extract from bits 255:128 5015 // 0x02 - extract from bits 383:256 5016 // 0x03 - extract from bits 511:384 5017 emit_int8(value & 0x3); 5018 } 5019 5020 void Assembler::vextractf64x2h(XMMRegister dst, XMMRegister src, int value) { 5021 assert(VM_Version::supports_evex(), ""); 5022 int vector_len = AVX_512bit; 5023 int src_enc = src->encoding(); 5024 int dst_enc = dst->encoding(); 5025 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, 5026 VM_Version::supports_avx512dq(), vector_len, false, false); 5027 emit_int8(0x19); 5028 emit_int8((unsigned char)(0xC0 | encode)); 5029 // 0x01 - extract from bits 255:128 5030 // 0x02 - extract from bits 383:256 5031 // 0x03 - extract from bits 511:384 5032 emit_int8(value & 0x3); 5033 } 5034 5035 // duplicate 4-bytes integer data from src into 8 locations in dest 5036 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) { 5037 assert(VM_Version::supports_avx2(), ""); 5038 int vector_len = AVX_256bit; 5039 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 5040 vector_len, VEX_OPCODE_0F_38, false); 5041 emit_int8(0x58); 5042 emit_int8((unsigned char)(0xC0 | encode)); 5043 } 5044 5045 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL 5046 void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { 5047 assert(VM_Version::supports_evex(), ""); 5048 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 5049 vector_len, VEX_OPCODE_0F_38, false); 5050 emit_int8(0x78); 5051 emit_int8((unsigned char)(0xC0 | encode)); 5052 } 5053 5054 void Assembler::evpbroadcastb(XMMRegister dst, Address src, int vector_len) { 5055 assert(VM_Version::supports_evex(), ""); 5056 tuple_type = EVEX_T1S; 5057 input_size_in_bits = EVEX_8bit; 5058 InstructionMark im(this); 5059 assert(dst != xnoreg, "sanity"); 5060 int dst_enc = dst->encoding(); 5061 // swap src<->dst for encoding 5062 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5063 emit_int8(0x78); 5064 emit_operand(dst, src); 5065 } 5066 5067 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL 5068 void Assembler::evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 5069 assert(VM_Version::supports_evex(), ""); 5070 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 5071 vector_len, VEX_OPCODE_0F_38, false); 5072 emit_int8(0x79); 5073 emit_int8((unsigned char)(0xC0 | encode)); 5074 } 5075 5076 void Assembler::evpbroadcastw(XMMRegister dst, Address src, int vector_len) { 5077 assert(VM_Version::supports_evex(), ""); 5078 tuple_type = EVEX_T1S; 5079 input_size_in_bits = EVEX_16bit; 5080 InstructionMark im(this); 5081 assert(dst != xnoreg, "sanity"); 5082 int dst_enc = dst->encoding(); 5083 // swap src<->dst for encoding 5084 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5085 emit_int8(0x79); 5086 emit_operand(dst, src); 5087 } 5088 5089 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5090 void Assembler::evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { 5091 assert(VM_Version::supports_evex(), ""); 5092 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, 5093 vector_len, VEX_OPCODE_0F_38, false); 5094 emit_int8(0x58); 5095 emit_int8((unsigned char)(0xC0 | encode)); 5096 } 5097 5098 void Assembler::evpbroadcastd(XMMRegister dst, Address src, int vector_len) { 5099 assert(VM_Version::supports_evex(), ""); 5100 tuple_type = EVEX_T1S; 5101 input_size_in_bits = EVEX_32bit; 5102 InstructionMark im(this); 5103 assert(dst != xnoreg, "sanity"); 5104 int dst_enc = dst->encoding(); 5105 // swap src<->dst for encoding 5106 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5107 emit_int8(0x58); 5108 emit_operand(dst, src); 5109 } 5110 5111 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5112 void Assembler::evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { 5113 assert(VM_Version::supports_evex(), ""); 5114 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5115 VEX_OPCODE_0F_38, true, vector_len, false, false); 5116 emit_int8(0x59); 5117 emit_int8((unsigned char)(0xC0 | encode)); 5118 } 5119 5120 void Assembler::evpbroadcastq(XMMRegister dst, Address src, int vector_len) { 5121 assert(VM_Version::supports_evex(), ""); 5122 tuple_type = EVEX_T1S; 5123 input_size_in_bits = EVEX_64bit; 5124 InstructionMark im(this); 5125 assert(dst != xnoreg, "sanity"); 5126 int dst_enc = dst->encoding(); 5127 // swap src<->dst for encoding 5128 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len); 5129 emit_int8(0x59); 5130 emit_operand(dst, src); 5131 } 5132 5133 // duplicate single precision fp from src into 4|8|16 locations in dest : requires AVX512VL 5134 void Assembler::evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { 5135 assert(VM_Version::supports_evex(), ""); 5136 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5137 VEX_OPCODE_0F_38, false, vector_len, false, false); 5138 emit_int8(0x18); 5139 emit_int8((unsigned char)(0xC0 | encode)); 5140 } 5141 5142 void Assembler::evpbroadcastss(XMMRegister dst, Address src, int vector_len) { 5143 assert(VM_Version::supports_evex(), ""); 5144 tuple_type = EVEX_T1S; 5145 input_size_in_bits = EVEX_32bit; 5146 InstructionMark im(this); 5147 assert(dst != xnoreg, "sanity"); 5148 int dst_enc = dst->encoding(); 5149 // swap src<->dst for encoding 5150 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len); 5151 emit_int8(0x18); 5152 emit_operand(dst, src); 5153 } 5154 5155 // duplicate double precision fp from src into 2|4|8 locations in dest : requires AVX512VL 5156 void Assembler::evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { 5157 assert(VM_Version::supports_evex(), ""); 5158 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5159 VEX_OPCODE_0F_38, true, vector_len, false, false); 5160 emit_int8(0x19); 5161 emit_int8((unsigned char)(0xC0 | encode)); 5162 } 5163 5164 void Assembler::evpbroadcastsd(XMMRegister dst, Address src, int vector_len) { 5165 assert(VM_Version::supports_evex(), ""); 5166 tuple_type = EVEX_T1S; 5167 input_size_in_bits = EVEX_64bit; 5168 InstructionMark im(this); 5169 assert(dst != xnoreg, "sanity"); 5170 int dst_enc = dst->encoding(); 5171 // swap src<->dst for encoding 5172 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len); 5173 emit_int8(0x19); 5174 emit_operand(dst, src); 5175 } 5176 5177 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL 5178 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { 5179 assert(VM_Version::supports_evex(), ""); 5180 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5181 VEX_OPCODE_0F_38, false, vector_len, false, false); 5182 emit_int8(0x7A); 5183 emit_int8((unsigned char)(0xC0 | encode)); 5184 } 5185 5186 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL 5187 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { 5188 assert(VM_Version::supports_evex(), ""); 5189 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5190 VEX_OPCODE_0F_38, false, vector_len, false, false); 5191 emit_int8(0x7B); 5192 emit_int8((unsigned char)(0xC0 | encode)); 5193 } 5194 5195 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5196 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { 5197 assert(VM_Version::supports_evex(), ""); 5198 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5199 VEX_OPCODE_0F_38, false, vector_len, false, false); 5200 emit_int8(0x7C); 5201 emit_int8((unsigned char)(0xC0 | encode)); 5202 } 5203 5204 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL 5205 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { 5206 assert(VM_Version::supports_evex(), ""); 5207 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, 5208 VEX_OPCODE_0F_38, true, vector_len, false, false); 5209 emit_int8(0x7C); 5210 emit_int8((unsigned char)(0xC0 | encode)); 5211 } 5212 5213 // Carry-Less Multiplication Quadword 5214 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { 5215 assert(VM_Version::supports_clmul(), ""); 5216 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false, 5217 VEX_OPCODE_0F_3A, false, AVX_128bit, true); 5218 emit_int8(0x44); 5219 emit_int8((unsigned char)(0xC0 | encode)); 5220 emit_int8((unsigned char)mask); 5221 } 5222 5223 // Carry-Less Multiplication Quadword 5224 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 5225 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 5226 int vector_len = AVX_128bit; 5227 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, 5228 vector_len, VEX_OPCODE_0F_3A, true); 5229 emit_int8(0x44); 5230 emit_int8((unsigned char)(0xC0 | encode)); 5231 emit_int8((unsigned char)mask); 5232 } 5233 5234 void Assembler::vzeroupper() { 5235 assert(VM_Version::supports_avx(), ""); 5236 if (UseAVX < 3) 5237 { 5238 (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE); 5239 emit_int8(0x77); 5240 } 5241 } 5242 5243 5244 #ifndef _LP64 5245 // 32bit only pieces of the assembler 5246 5247 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 5248 // NO PREFIX AS NEVER 64BIT 5249 InstructionMark im(this); 5250 emit_int8((unsigned char)0x81); 5251 emit_int8((unsigned char)(0xF8 | src1->encoding())); 5252 emit_data(imm32, rspec, 0); 5253 } 5254 5255 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { 5256 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs 5257 InstructionMark im(this); 5258 emit_int8((unsigned char)0x81); 5259 emit_operand(rdi, src1); 5260 emit_data(imm32, rspec, 0); 5261 } 5262 5263 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, 5264 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded 5265 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. 5266 void Assembler::cmpxchg8(Address adr) { 5267 InstructionMark im(this); 5268 emit_int8(0x0F); 5269 emit_int8((unsigned char)0xC7); 5270 emit_operand(rcx, adr); 5271 } 5272 5273 void Assembler::decl(Register dst) { 5274 // Don't use it directly. Use MacroAssembler::decrementl() instead. 5275 emit_int8(0x48 | dst->encoding()); 5276 } 5277 5278 #endif // _LP64 5279 5280 // 64bit typically doesn't use the x87 but needs to for the trig funcs 5281 5282 void Assembler::fabs() { 5283 emit_int8((unsigned char)0xD9); 5284 emit_int8((unsigned char)0xE1); 5285 } 5286 5287 void Assembler::fadd(int i) { 5288 emit_farith(0xD8, 0xC0, i); 5289 } 5290 5291 void Assembler::fadd_d(Address src) { 5292 InstructionMark im(this); 5293 emit_int8((unsigned char)0xDC); 5294 emit_operand32(rax, src); 5295 } 5296 5297 void Assembler::fadd_s(Address src) { 5298 InstructionMark im(this); 5299 emit_int8((unsigned char)0xD8); 5300 emit_operand32(rax, src); 5301 } 5302 5303 void Assembler::fadda(int i) { 5304 emit_farith(0xDC, 0xC0, i); 5305 } 5306 5307 void Assembler::faddp(int i) { 5308 emit_farith(0xDE, 0xC0, i); 5309 } 5310 5311 void Assembler::fchs() { 5312 emit_int8((unsigned char)0xD9); 5313 emit_int8((unsigned char)0xE0); 5314 } 5315 5316 void Assembler::fcom(int i) { 5317 emit_farith(0xD8, 0xD0, i); 5318 } 5319 5320 void Assembler::fcomp(int i) { 5321 emit_farith(0xD8, 0xD8, i); 5322 } 5323 5324 void Assembler::fcomp_d(Address src) { 5325 InstructionMark im(this); 5326 emit_int8((unsigned char)0xDC); 5327 emit_operand32(rbx, src); 5328 } 5329 5330 void Assembler::fcomp_s(Address src) { 5331 InstructionMark im(this); 5332 emit_int8((unsigned char)0xD8); 5333 emit_operand32(rbx, src); 5334 } 5335 5336 void Assembler::fcompp() { 5337 emit_int8((unsigned char)0xDE); 5338 emit_int8((unsigned char)0xD9); 5339 } 5340 5341 void Assembler::fcos() { 5342 emit_int8((unsigned char)0xD9); 5343 emit_int8((unsigned char)0xFF); 5344 } 5345 5346 void Assembler::fdecstp() { 5347 emit_int8((unsigned char)0xD9); 5348 emit_int8((unsigned char)0xF6); 5349 } 5350 5351 void Assembler::fdiv(int i) { 5352 emit_farith(0xD8, 0xF0, i); 5353 } 5354 5355 void Assembler::fdiv_d(Address src) { 5356 InstructionMark im(this); 5357 emit_int8((unsigned char)0xDC); 5358 emit_operand32(rsi, src); 5359 } 5360 5361 void Assembler::fdiv_s(Address src) { 5362 InstructionMark im(this); 5363 emit_int8((unsigned char)0xD8); 5364 emit_operand32(rsi, src); 5365 } 5366 5367 void Assembler::fdiva(int i) { 5368 emit_farith(0xDC, 0xF8, i); 5369 } 5370 5371 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) 5372 // is erroneous for some of the floating-point instructions below. 5373 5374 void Assembler::fdivp(int i) { 5375 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) 5376 } 5377 5378 void Assembler::fdivr(int i) { 5379 emit_farith(0xD8, 0xF8, i); 5380 } 5381 5382 void Assembler::fdivr_d(Address src) { 5383 InstructionMark im(this); 5384 emit_int8((unsigned char)0xDC); 5385 emit_operand32(rdi, src); 5386 } 5387 5388 void Assembler::fdivr_s(Address src) { 5389 InstructionMark im(this); 5390 emit_int8((unsigned char)0xD8); 5391 emit_operand32(rdi, src); 5392 } 5393 5394 void Assembler::fdivra(int i) { 5395 emit_farith(0xDC, 0xF0, i); 5396 } 5397 5398 void Assembler::fdivrp(int i) { 5399 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) 5400 } 5401 5402 void Assembler::ffree(int i) { 5403 emit_farith(0xDD, 0xC0, i); 5404 } 5405 5406 void Assembler::fild_d(Address adr) { 5407 InstructionMark im(this); 5408 emit_int8((unsigned char)0xDF); 5409 emit_operand32(rbp, adr); 5410 } 5411 5412 void Assembler::fild_s(Address adr) { 5413 InstructionMark im(this); 5414 emit_int8((unsigned char)0xDB); 5415 emit_operand32(rax, adr); 5416 } 5417 5418 void Assembler::fincstp() { 5419 emit_int8((unsigned char)0xD9); 5420 emit_int8((unsigned char)0xF7); 5421 } 5422 5423 void Assembler::finit() { 5424 emit_int8((unsigned char)0x9B); 5425 emit_int8((unsigned char)0xDB); 5426 emit_int8((unsigned char)0xE3); 5427 } 5428 5429 void Assembler::fist_s(Address adr) { 5430 InstructionMark im(this); 5431 emit_int8((unsigned char)0xDB); 5432 emit_operand32(rdx, adr); 5433 } 5434 5435 void Assembler::fistp_d(Address adr) { 5436 InstructionMark im(this); 5437 emit_int8((unsigned char)0xDF); 5438 emit_operand32(rdi, adr); 5439 } 5440 5441 void Assembler::fistp_s(Address adr) { 5442 InstructionMark im(this); 5443 emit_int8((unsigned char)0xDB); 5444 emit_operand32(rbx, adr); 5445 } 5446 5447 void Assembler::fld1() { 5448 emit_int8((unsigned char)0xD9); 5449 emit_int8((unsigned char)0xE8); 5450 } 5451 5452 void Assembler::fld_d(Address adr) { 5453 InstructionMark im(this); 5454 emit_int8((unsigned char)0xDD); 5455 emit_operand32(rax, adr); 5456 } 5457 5458 void Assembler::fld_s(Address adr) { 5459 InstructionMark im(this); 5460 emit_int8((unsigned char)0xD9); 5461 emit_operand32(rax, adr); 5462 } 5463 5464 5465 void Assembler::fld_s(int index) { 5466 emit_farith(0xD9, 0xC0, index); 5467 } 5468 5469 void Assembler::fld_x(Address adr) { 5470 InstructionMark im(this); 5471 emit_int8((unsigned char)0xDB); 5472 emit_operand32(rbp, adr); 5473 } 5474 5475 void Assembler::fldcw(Address src) { 5476 InstructionMark im(this); 5477 emit_int8((unsigned char)0xD9); 5478 emit_operand32(rbp, src); 5479 } 5480 5481 void Assembler::fldenv(Address src) { 5482 InstructionMark im(this); 5483 emit_int8((unsigned char)0xD9); 5484 emit_operand32(rsp, src); 5485 } 5486 5487 void Assembler::fldlg2() { 5488 emit_int8((unsigned char)0xD9); 5489 emit_int8((unsigned char)0xEC); 5490 } 5491 5492 void Assembler::fldln2() { 5493 emit_int8((unsigned char)0xD9); 5494 emit_int8((unsigned char)0xED); 5495 } 5496 5497 void Assembler::fldz() { 5498 emit_int8((unsigned char)0xD9); 5499 emit_int8((unsigned char)0xEE); 5500 } 5501 5502 void Assembler::flog() { 5503 fldln2(); 5504 fxch(); 5505 fyl2x(); 5506 } 5507 5508 void Assembler::flog10() { 5509 fldlg2(); 5510 fxch(); 5511 fyl2x(); 5512 } 5513 5514 void Assembler::fmul(int i) { 5515 emit_farith(0xD8, 0xC8, i); 5516 } 5517 5518 void Assembler::fmul_d(Address src) { 5519 InstructionMark im(this); 5520 emit_int8((unsigned char)0xDC); 5521 emit_operand32(rcx, src); 5522 } 5523 5524 void Assembler::fmul_s(Address src) { 5525 InstructionMark im(this); 5526 emit_int8((unsigned char)0xD8); 5527 emit_operand32(rcx, src); 5528 } 5529 5530 void Assembler::fmula(int i) { 5531 emit_farith(0xDC, 0xC8, i); 5532 } 5533 5534 void Assembler::fmulp(int i) { 5535 emit_farith(0xDE, 0xC8, i); 5536 } 5537 5538 void Assembler::fnsave(Address dst) { 5539 InstructionMark im(this); 5540 emit_int8((unsigned char)0xDD); 5541 emit_operand32(rsi, dst); 5542 } 5543 5544 void Assembler::fnstcw(Address src) { 5545 InstructionMark im(this); 5546 emit_int8((unsigned char)0x9B); 5547 emit_int8((unsigned char)0xD9); 5548 emit_operand32(rdi, src); 5549 } 5550 5551 void Assembler::fnstsw_ax() { 5552 emit_int8((unsigned char)0xDF); 5553 emit_int8((unsigned char)0xE0); 5554 } 5555 5556 void Assembler::fprem() { 5557 emit_int8((unsigned char)0xD9); 5558 emit_int8((unsigned char)0xF8); 5559 } 5560 5561 void Assembler::fprem1() { 5562 emit_int8((unsigned char)0xD9); 5563 emit_int8((unsigned char)0xF5); 5564 } 5565 5566 void Assembler::frstor(Address src) { 5567 InstructionMark im(this); 5568 emit_int8((unsigned char)0xDD); 5569 emit_operand32(rsp, src); 5570 } 5571 5572 void Assembler::fsin() { 5573 emit_int8((unsigned char)0xD9); 5574 emit_int8((unsigned char)0xFE); 5575 } 5576 5577 void Assembler::fsqrt() { 5578 emit_int8((unsigned char)0xD9); 5579 emit_int8((unsigned char)0xFA); 5580 } 5581 5582 void Assembler::fst_d(Address adr) { 5583 InstructionMark im(this); 5584 emit_int8((unsigned char)0xDD); 5585 emit_operand32(rdx, adr); 5586 } 5587 5588 void Assembler::fst_s(Address adr) { 5589 InstructionMark im(this); 5590 emit_int8((unsigned char)0xD9); 5591 emit_operand32(rdx, adr); 5592 } 5593 5594 void Assembler::fstp_d(Address adr) { 5595 InstructionMark im(this); 5596 emit_int8((unsigned char)0xDD); 5597 emit_operand32(rbx, adr); 5598 } 5599 5600 void Assembler::fstp_d(int index) { 5601 emit_farith(0xDD, 0xD8, index); 5602 } 5603 5604 void Assembler::fstp_s(Address adr) { 5605 InstructionMark im(this); 5606 emit_int8((unsigned char)0xD9); 5607 emit_operand32(rbx, adr); 5608 } 5609 5610 void Assembler::fstp_x(Address adr) { 5611 InstructionMark im(this); 5612 emit_int8((unsigned char)0xDB); 5613 emit_operand32(rdi, adr); 5614 } 5615 5616 void Assembler::fsub(int i) { 5617 emit_farith(0xD8, 0xE0, i); 5618 } 5619 5620 void Assembler::fsub_d(Address src) { 5621 InstructionMark im(this); 5622 emit_int8((unsigned char)0xDC); 5623 emit_operand32(rsp, src); 5624 } 5625 5626 void Assembler::fsub_s(Address src) { 5627 InstructionMark im(this); 5628 emit_int8((unsigned char)0xD8); 5629 emit_operand32(rsp, src); 5630 } 5631 5632 void Assembler::fsuba(int i) { 5633 emit_farith(0xDC, 0xE8, i); 5634 } 5635 5636 void Assembler::fsubp(int i) { 5637 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) 5638 } 5639 5640 void Assembler::fsubr(int i) { 5641 emit_farith(0xD8, 0xE8, i); 5642 } 5643 5644 void Assembler::fsubr_d(Address src) { 5645 InstructionMark im(this); 5646 emit_int8((unsigned char)0xDC); 5647 emit_operand32(rbp, src); 5648 } 5649 5650 void Assembler::fsubr_s(Address src) { 5651 InstructionMark im(this); 5652 emit_int8((unsigned char)0xD8); 5653 emit_operand32(rbp, src); 5654 } 5655 5656 void Assembler::fsubra(int i) { 5657 emit_farith(0xDC, 0xE0, i); 5658 } 5659 5660 void Assembler::fsubrp(int i) { 5661 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) 5662 } 5663 5664 void Assembler::ftan() { 5665 emit_int8((unsigned char)0xD9); 5666 emit_int8((unsigned char)0xF2); 5667 emit_int8((unsigned char)0xDD); 5668 emit_int8((unsigned char)0xD8); 5669 } 5670 5671 void Assembler::ftst() { 5672 emit_int8((unsigned char)0xD9); 5673 emit_int8((unsigned char)0xE4); 5674 } 5675 5676 void Assembler::fucomi(int i) { 5677 // make sure the instruction is supported (introduced for P6, together with cmov) 5678 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 5679 emit_farith(0xDB, 0xE8, i); 5680 } 5681 5682 void Assembler::fucomip(int i) { 5683 // make sure the instruction is supported (introduced for P6, together with cmov) 5684 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 5685 emit_farith(0xDF, 0xE8, i); 5686 } 5687 5688 void Assembler::fwait() { 5689 emit_int8((unsigned char)0x9B); 5690 } 5691 5692 void Assembler::fxch(int i) { 5693 emit_farith(0xD9, 0xC8, i); 5694 } 5695 5696 void Assembler::fyl2x() { 5697 emit_int8((unsigned char)0xD9); 5698 emit_int8((unsigned char)0xF1); 5699 } 5700 5701 void Assembler::frndint() { 5702 emit_int8((unsigned char)0xD9); 5703 emit_int8((unsigned char)0xFC); 5704 } 5705 5706 void Assembler::f2xm1() { 5707 emit_int8((unsigned char)0xD9); 5708 emit_int8((unsigned char)0xF0); 5709 } 5710 5711 void Assembler::fldl2e() { 5712 emit_int8((unsigned char)0xD9); 5713 emit_int8((unsigned char)0xEA); 5714 } 5715 5716 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. 5717 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; 5718 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding. 5719 static int simd_opc[4] = { 0, 0, 0x38, 0x3A }; 5720 5721 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding. 5722 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 5723 if (pre > 0) { 5724 emit_int8(simd_pre[pre]); 5725 } 5726 if (rex_w) { 5727 prefixq(adr, xreg); 5728 } else { 5729 prefix(adr, xreg); 5730 } 5731 if (opc > 0) { 5732 emit_int8(0x0F); 5733 int opc2 = simd_opc[opc]; 5734 if (opc2 > 0) { 5735 emit_int8(opc2); 5736 } 5737 } 5738 } 5739 5740 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 5741 if (pre > 0) { 5742 emit_int8(simd_pre[pre]); 5743 } 5744 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : 5745 prefix_and_encode(dst_enc, src_enc); 5746 if (opc > 0) { 5747 emit_int8(0x0F); 5748 int opc2 = simd_opc[opc]; 5749 if (opc2 > 0) { 5750 emit_int8(opc2); 5751 } 5752 } 5753 return encode; 5754 } 5755 5756 5757 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, int vector_len) { 5758 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { 5759 prefix(VEX_3bytes); 5760 5761 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); 5762 byte1 = (~byte1) & 0xE0; 5763 byte1 |= opc; 5764 emit_int8(byte1); 5765 5766 int byte2 = ((~nds_enc) & 0xf) << 3; 5767 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; 5768 emit_int8(byte2); 5769 } else { 5770 prefix(VEX_2bytes); 5771 5772 int byte1 = vex_r ? VEX_R : 0; 5773 byte1 = (~byte1) & 0x80; 5774 byte1 |= ((~nds_enc) & 0xf) << 3; 5775 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; 5776 emit_int8(byte1); 5777 } 5778 } 5779 5780 // This is a 4 byte encoding 5781 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, bool evex_r, bool evex_v, 5782 int nds_enc, VexSimdPrefix pre, VexOpcode opc, 5783 bool is_extended_context, bool is_merge_context, 5784 int vector_len, bool no_mask_reg ){ 5785 // EVEX 0x62 prefix 5786 prefix(EVEX_4bytes); 5787 evex_encoding = (vex_w ? VEX_W : 0) | (evex_r ? EVEX_Rb : 0); 5788 5789 // P0: byte 2, initialized to RXBR`00mm 5790 // instead of not'd 5791 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); 5792 byte2 = (~byte2) & 0xF0; 5793 // confine opc opcode extensions in mm bits to lower two bits 5794 // of form {0F, 0F_38, 0F_3A} 5795 byte2 |= opc; 5796 emit_int8(byte2); 5797 5798 // P1: byte 3 as Wvvvv1pp 5799 int byte3 = ((~nds_enc) & 0xf) << 3; 5800 // p[10] is always 1 5801 byte3 |= EVEX_F; 5802 byte3 |= (vex_w & 1) << 7; 5803 // confine pre opcode extensions in pp bits to lower two bits 5804 // of form {66, F3, F2} 5805 byte3 |= pre; 5806 emit_int8(byte3); 5807 5808 // P2: byte 4 as zL'Lbv'aaa 5809 int byte4 = (no_mask_reg) ? 0 : 1; // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now) 5810 // EVEX.v` for extending EVEX.vvvv or VIDX 5811 byte4 |= (evex_v ? 0: EVEX_V); 5812 // third EXEC.b for broadcast actions 5813 byte4 |= (is_extended_context ? EVEX_Rb : 0); 5814 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024 5815 byte4 |= ((vector_len) & 0x3) << 5; 5816 // last is EVEX.z for zero/merge actions 5817 byte4 |= (is_merge_context ? EVEX_Z : 0); 5818 emit_int8(byte4); 5819 } 5820 5821 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, 5822 VexOpcode opc, bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg) { 5823 bool vex_r = ((xreg_enc & 8) == 8) ? 1 : 0; 5824 bool vex_b = adr.base_needs_rex(); 5825 bool vex_x = adr.index_needs_rex(); 5826 avx_vector_len = vector_len; 5827 5828 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit 5829 if (VM_Version::supports_avx512vl() == false) { 5830 switch (vector_len) { 5831 case AVX_128bit: 5832 case AVX_256bit: 5833 legacy_mode = true; 5834 break; 5835 } 5836 } 5837 5838 if ((UseAVX > 2) && (legacy_mode == false)) 5839 { 5840 bool evex_r = (xreg_enc >= 16); 5841 bool evex_v = (nds_enc >= 16); 5842 is_evex_instruction = true; 5843 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg); 5844 } else { 5845 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector_len); 5846 } 5847 } 5848 5849 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, 5850 bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg ) { 5851 bool vex_r = ((dst_enc & 8) == 8) ? 1 : 0; 5852 bool vex_b = ((src_enc & 8) == 8) ? 1 : 0; 5853 bool vex_x = false; 5854 avx_vector_len = vector_len; 5855 5856 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit 5857 if (VM_Version::supports_avx512vl() == false) { 5858 switch (vector_len) { 5859 case AVX_128bit: 5860 case AVX_256bit: 5861 legacy_mode = true; 5862 break; 5863 } 5864 } 5865 5866 if ((UseAVX > 2) && (legacy_mode == false)) 5867 { 5868 bool evex_r = (dst_enc >= 16); 5869 bool evex_v = (nds_enc >= 16); 5870 // can use vex_x as bank extender on rm encoding 5871 vex_x = (src_enc >= 16); 5872 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg); 5873 } else { 5874 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector_len); 5875 } 5876 5877 // return modrm byte components for operands 5878 return (((dst_enc & 7) << 3) | (src_enc & 7)); 5879 } 5880 5881 5882 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 5883 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len, bool legacy_mode) { 5884 if (UseAVX > 0) { 5885 int xreg_enc = xreg->encoding(); 5886 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5887 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector_len, legacy_mode, no_mask_reg); 5888 } else { 5889 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); 5890 rex_prefix(adr, xreg, pre, opc, rex_w); 5891 } 5892 } 5893 5894 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 5895 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len, bool legacy_mode) { 5896 int dst_enc = dst->encoding(); 5897 int src_enc = src->encoding(); 5898 if (UseAVX > 0) { 5899 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5900 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector_len, legacy_mode, no_mask_reg); 5901 } else { 5902 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); 5903 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w); 5904 } 5905 } 5906 5907 int Assembler::kreg_prefix_and_encode(KRegister dst, KRegister nds, KRegister src, VexSimdPrefix pre, 5908 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len) { 5909 int dst_enc = dst->encoding(); 5910 int src_enc = src->encoding(); 5911 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5912 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector_len, true, no_mask_reg); 5913 } 5914 5915 int Assembler::kreg_prefix_and_encode(KRegister dst, KRegister nds, Register src, VexSimdPrefix pre, 5916 bool no_mask_reg, VexOpcode opc, bool rex_w, int vector_len) { 5917 int dst_enc = dst->encoding(); 5918 int src_enc = src->encoding(); 5919 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 5920 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector_len, true, no_mask_reg); 5921 } 5922 5923 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg, bool legacy_mode) { 5924 InstructionMark im(this); 5925 simd_prefix(dst, dst, src, pre, no_mask_reg, VEX_OPCODE_0F, false, AVX_128bit, legacy_mode); 5926 emit_int8(opcode); 5927 emit_operand(dst, src); 5928 } 5929 5930 void Assembler::emit_simd_arith_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg) { 5931 InstructionMark im(this); 5932 simd_prefix_q(dst, dst, src, pre, no_mask_reg); 5933 emit_int8(opcode); 5934 emit_operand(dst, src); 5935 } 5936 5937 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg, bool legacy_mode) { 5938 int encode = simd_prefix_and_encode(dst, dst, src, pre, no_mask_reg, VEX_OPCODE_0F, false, AVX_128bit, legacy_mode); 5939 emit_int8(opcode); 5940 emit_int8((unsigned char)(0xC0 | encode)); 5941 } 5942 5943 void Assembler::emit_simd_arith_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg) { 5944 int encode = simd_prefix_and_encode(dst, dst, src, pre, no_mask_reg, VEX_OPCODE_0F, true, AVX_128bit); 5945 emit_int8(opcode); 5946 emit_int8((unsigned char)(0xC0 | encode)); 5947 } 5948 5949 // Versions with no second source register (non-destructive source). 5950 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool opNoRegMask) { 5951 InstructionMark im(this); 5952 simd_prefix(dst, xnoreg, src, pre, opNoRegMask); 5953 emit_int8(opcode); 5954 emit_operand(dst, src); 5955 } 5956 5957 void Assembler::emit_simd_arith_nonds_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool opNoRegMask) { 5958 InstructionMark im(this); 5959 simd_prefix_q(dst, xnoreg, src, pre, opNoRegMask); 5960 emit_int8(opcode); 5961 emit_operand(dst, src); 5962 } 5963 5964 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg, bool legacy_mode) { 5965 int encode = simd_prefix_and_encode(dst, xnoreg, src, pre, no_mask_reg, VEX_OPCODE_0F, legacy_mode, AVX_128bit); 5966 emit_int8(opcode); 5967 emit_int8((unsigned char)(0xC0 | encode)); 5968 } 5969 5970 void Assembler::emit_simd_arith_nonds_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg) { 5971 int encode = simd_prefix_and_encode(dst, xnoreg, src, pre, no_mask_reg, VEX_OPCODE_0F, true, AVX_128bit); 5972 emit_int8(opcode); 5973 emit_int8((unsigned char)(0xC0 | encode)); 5974 } 5975 5976 // 3-operands AVX instructions 5977 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, Address src, 5978 VexSimdPrefix pre, int vector_len, bool no_mask_reg, bool legacy_mode) { 5979 InstructionMark im(this); 5980 vex_prefix(dst, nds, src, pre, vector_len, no_mask_reg, legacy_mode); 5981 emit_int8(opcode); 5982 emit_operand(dst, src); 5983 } 5984 5985 void Assembler::emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds, 5986 Address src, VexSimdPrefix pre, int vector_len, bool no_mask_reg) { 5987 InstructionMark im(this); 5988 vex_prefix_q(dst, nds, src, pre, vector_len, no_mask_reg); 5989 emit_int8(opcode); 5990 emit_operand(dst, src); 5991 } 5992 5993 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, 5994 VexSimdPrefix pre, int vector_len, bool no_mask_reg, bool legacy_mode) { 5995 int encode = vex_prefix_and_encode(dst, nds, src, pre, vector_len, VEX_OPCODE_0F, false, no_mask_reg); 5996 emit_int8(opcode); 5997 emit_int8((unsigned char)(0xC0 | encode)); 5998 } 5999 6000 void Assembler::emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, 6001 VexSimdPrefix pre, int vector_len, bool no_mask_reg) { 6002 int src_enc = src->encoding(); 6003 int dst_enc = dst->encoding(); 6004 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 6005 int encode = vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, VEX_OPCODE_0F, true, vector_len, false, no_mask_reg); 6006 emit_int8(opcode); 6007 emit_int8((unsigned char)(0xC0 | encode)); 6008 } 6009 6010 #ifndef _LP64 6011 6012 void Assembler::incl(Register dst) { 6013 // Don't use it directly. Use MacroAssembler::incrementl() instead. 6014 emit_int8(0x40 | dst->encoding()); 6015 } 6016 6017 void Assembler::lea(Register dst, Address src) { 6018 leal(dst, src); 6019 } 6020 6021 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { 6022 InstructionMark im(this); 6023 emit_int8((unsigned char)0xC7); 6024 emit_operand(rax, dst); 6025 emit_data((int)imm32, rspec, 0); 6026 } 6027 6028 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { 6029 InstructionMark im(this); 6030 int encode = prefix_and_encode(dst->encoding()); 6031 emit_int8((unsigned char)(0xB8 | encode)); 6032 emit_data((int)imm32, rspec, 0); 6033 } 6034 6035 void Assembler::popa() { // 32bit 6036 emit_int8(0x61); 6037 } 6038 6039 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { 6040 InstructionMark im(this); 6041 emit_int8(0x68); 6042 emit_data(imm32, rspec, 0); 6043 } 6044 6045 void Assembler::pusha() { // 32bit 6046 emit_int8(0x60); 6047 } 6048 6049 void Assembler::set_byte_if_not_zero(Register dst) { 6050 emit_int8(0x0F); 6051 emit_int8((unsigned char)0x95); 6052 emit_int8((unsigned char)(0xE0 | dst->encoding())); 6053 } 6054 6055 void Assembler::shldl(Register dst, Register src) { 6056 emit_int8(0x0F); 6057 emit_int8((unsigned char)0xA5); 6058 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 6059 } 6060 6061 void Assembler::shrdl(Register dst, Register src) { 6062 emit_int8(0x0F); 6063 emit_int8((unsigned char)0xAD); 6064 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); 6065 } 6066 6067 #else // LP64 6068 6069 void Assembler::set_byte_if_not_zero(Register dst) { 6070 int enc = prefix_and_encode(dst->encoding(), true); 6071 emit_int8(0x0F); 6072 emit_int8((unsigned char)0x95); 6073 emit_int8((unsigned char)(0xE0 | enc)); 6074 } 6075 6076 // 64bit only pieces of the assembler 6077 // This should only be used by 64bit instructions that can use rip-relative 6078 // it cannot be used by instructions that want an immediate value. 6079 6080 bool Assembler::reachable(AddressLiteral adr) { 6081 int64_t disp; 6082 // None will force a 64bit literal to the code stream. Likely a placeholder 6083 // for something that will be patched later and we need to certain it will 6084 // always be reachable. 6085 if (adr.reloc() == relocInfo::none) { 6086 return false; 6087 } 6088 if (adr.reloc() == relocInfo::internal_word_type) { 6089 // This should be rip relative and easily reachable. 6090 return true; 6091 } 6092 if (adr.reloc() == relocInfo::virtual_call_type || 6093 adr.reloc() == relocInfo::opt_virtual_call_type || 6094 adr.reloc() == relocInfo::static_call_type || 6095 adr.reloc() == relocInfo::static_stub_type ) { 6096 // This should be rip relative within the code cache and easily 6097 // reachable until we get huge code caches. (At which point 6098 // ic code is going to have issues). 6099 return true; 6100 } 6101 if (adr.reloc() != relocInfo::external_word_type && 6102 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special 6103 adr.reloc() != relocInfo::poll_type && // relocs to identify them 6104 adr.reloc() != relocInfo::runtime_call_type ) { 6105 return false; 6106 } 6107 6108 // Stress the correction code 6109 if (ForceUnreachable) { 6110 // Must be runtimecall reloc, see if it is in the codecache 6111 // Flipping stuff in the codecache to be unreachable causes issues 6112 // with things like inline caches where the additional instructions 6113 // are not handled. 6114 if (CodeCache::find_blob(adr._target) == NULL) { 6115 return false; 6116 } 6117 } 6118 // For external_word_type/runtime_call_type if it is reachable from where we 6119 // are now (possibly a temp buffer) and where we might end up 6120 // anywhere in the codeCache then we are always reachable. 6121 // This would have to change if we ever save/restore shared code 6122 // to be more pessimistic. 6123 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); 6124 if (!is_simm32(disp)) return false; 6125 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); 6126 if (!is_simm32(disp)) return false; 6127 6128 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int)); 6129 6130 // Because rip relative is a disp + address_of_next_instruction and we 6131 // don't know the value of address_of_next_instruction we apply a fudge factor 6132 // to make sure we will be ok no matter the size of the instruction we get placed into. 6133 // We don't have to fudge the checks above here because they are already worst case. 6134 6135 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal 6136 // + 4 because better safe than sorry. 6137 const int fudge = 12 + 4; 6138 if (disp < 0) { 6139 disp -= fudge; 6140 } else { 6141 disp += fudge; 6142 } 6143 return is_simm32(disp); 6144 } 6145 6146 // Check if the polling page is not reachable from the code cache using rip-relative 6147 // addressing. 6148 bool Assembler::is_polling_page_far() { 6149 intptr_t addr = (intptr_t)os::get_polling_page(); 6150 return ForceUnreachable || 6151 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) || 6152 !is_simm32(addr - (intptr_t)CodeCache::high_bound()); 6153 } 6154 6155 void Assembler::emit_data64(jlong data, 6156 relocInfo::relocType rtype, 6157 int format) { 6158 if (rtype == relocInfo::none) { 6159 emit_int64(data); 6160 } else { 6161 emit_data64(data, Relocation::spec_simple(rtype), format); 6162 } 6163 } 6164 6165 void Assembler::emit_data64(jlong data, 6166 RelocationHolder const& rspec, 6167 int format) { 6168 assert(imm_operand == 0, "default format must be immediate in this file"); 6169 assert(imm_operand == format, "must be immediate"); 6170 assert(inst_mark() != NULL, "must be inside InstructionMark"); 6171 // Do not use AbstractAssembler::relocate, which is not intended for 6172 // embedded words. Instead, relocate to the enclosing instruction. 6173 code_section()->relocate(inst_mark(), rspec, format); 6174 #ifdef ASSERT 6175 check_relocation(rspec, format); 6176 #endif 6177 emit_int64(data); 6178 } 6179 6180 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 6181 if (reg_enc >= 8) { 6182 prefix(REX_B); 6183 reg_enc -= 8; 6184 } else if (byteinst && reg_enc >= 4) { 6185 prefix(REX); 6186 } 6187 return reg_enc; 6188 } 6189 6190 int Assembler::prefixq_and_encode(int reg_enc) { 6191 if (reg_enc < 8) { 6192 prefix(REX_W); 6193 } else { 6194 prefix(REX_WB); 6195 reg_enc -= 8; 6196 } 6197 return reg_enc; 6198 } 6199 6200 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { 6201 if (dst_enc < 8) { 6202 if (src_enc >= 8) { 6203 prefix(REX_B); 6204 src_enc -= 8; 6205 } else if (byteinst && src_enc >= 4) { 6206 prefix(REX); 6207 } 6208 } else { 6209 if (src_enc < 8) { 6210 prefix(REX_R); 6211 } else { 6212 prefix(REX_RB); 6213 src_enc -= 8; 6214 } 6215 dst_enc -= 8; 6216 } 6217 return dst_enc << 3 | src_enc; 6218 } 6219 6220 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { 6221 if (dst_enc < 8) { 6222 if (src_enc < 8) { 6223 prefix(REX_W); 6224 } else { 6225 prefix(REX_WB); 6226 src_enc -= 8; 6227 } 6228 } else { 6229 if (src_enc < 8) { 6230 prefix(REX_WR); 6231 } else { 6232 prefix(REX_WRB); 6233 src_enc -= 8; 6234 } 6235 dst_enc -= 8; 6236 } 6237 return dst_enc << 3 | src_enc; 6238 } 6239 6240 void Assembler::prefix(Register reg) { 6241 if (reg->encoding() >= 8) { 6242 prefix(REX_B); 6243 } 6244 } 6245 6246 void Assembler::prefix(Address adr) { 6247 if (adr.base_needs_rex()) { 6248 if (adr.index_needs_rex()) { 6249 prefix(REX_XB); 6250 } else { 6251 prefix(REX_B); 6252 } 6253 } else { 6254 if (adr.index_needs_rex()) { 6255 prefix(REX_X); 6256 } 6257 } 6258 } 6259 6260 void Assembler::prefixq(Address adr) { 6261 if (adr.base_needs_rex()) { 6262 if (adr.index_needs_rex()) { 6263 prefix(REX_WXB); 6264 } else { 6265 prefix(REX_WB); 6266 } 6267 } else { 6268 if (adr.index_needs_rex()) { 6269 prefix(REX_WX); 6270 } else { 6271 prefix(REX_W); 6272 } 6273 } 6274 } 6275 6276 6277 void Assembler::prefix(Address adr, Register reg, bool byteinst) { 6278 if (reg->encoding() < 8) { 6279 if (adr.base_needs_rex()) { 6280 if (adr.index_needs_rex()) { 6281 prefix(REX_XB); 6282 } else { 6283 prefix(REX_B); 6284 } 6285 } else { 6286 if (adr.index_needs_rex()) { 6287 prefix(REX_X); 6288 } else if (byteinst && reg->encoding() >= 4 ) { 6289 prefix(REX); 6290 } 6291 } 6292 } else { 6293 if (adr.base_needs_rex()) { 6294 if (adr.index_needs_rex()) { 6295 prefix(REX_RXB); 6296 } else { 6297 prefix(REX_RB); 6298 } 6299 } else { 6300 if (adr.index_needs_rex()) { 6301 prefix(REX_RX); 6302 } else { 6303 prefix(REX_R); 6304 } 6305 } 6306 } 6307 } 6308 6309 void Assembler::prefixq(Address adr, Register src) { 6310 if (src->encoding() < 8) { 6311 if (adr.base_needs_rex()) { 6312 if (adr.index_needs_rex()) { 6313 prefix(REX_WXB); 6314 } else { 6315 prefix(REX_WB); 6316 } 6317 } else { 6318 if (adr.index_needs_rex()) { 6319 prefix(REX_WX); 6320 } else { 6321 prefix(REX_W); 6322 } 6323 } 6324 } else { 6325 if (adr.base_needs_rex()) { 6326 if (adr.index_needs_rex()) { 6327 prefix(REX_WRXB); 6328 } else { 6329 prefix(REX_WRB); 6330 } 6331 } else { 6332 if (adr.index_needs_rex()) { 6333 prefix(REX_WRX); 6334 } else { 6335 prefix(REX_WR); 6336 } 6337 } 6338 } 6339 } 6340 6341 void Assembler::prefix(Address adr, XMMRegister reg) { 6342 if (reg->encoding() < 8) { 6343 if (adr.base_needs_rex()) { 6344 if (adr.index_needs_rex()) { 6345 prefix(REX_XB); 6346 } else { 6347 prefix(REX_B); 6348 } 6349 } else { 6350 if (adr.index_needs_rex()) { 6351 prefix(REX_X); 6352 } 6353 } 6354 } else { 6355 if (adr.base_needs_rex()) { 6356 if (adr.index_needs_rex()) { 6357 prefix(REX_RXB); 6358 } else { 6359 prefix(REX_RB); 6360 } 6361 } else { 6362 if (adr.index_needs_rex()) { 6363 prefix(REX_RX); 6364 } else { 6365 prefix(REX_R); 6366 } 6367 } 6368 } 6369 } 6370 6371 void Assembler::prefixq(Address adr, XMMRegister src) { 6372 if (src->encoding() < 8) { 6373 if (adr.base_needs_rex()) { 6374 if (adr.index_needs_rex()) { 6375 prefix(REX_WXB); 6376 } else { 6377 prefix(REX_WB); 6378 } 6379 } else { 6380 if (adr.index_needs_rex()) { 6381 prefix(REX_WX); 6382 } else { 6383 prefix(REX_W); 6384 } 6385 } 6386 } else { 6387 if (adr.base_needs_rex()) { 6388 if (adr.index_needs_rex()) { 6389 prefix(REX_WRXB); 6390 } else { 6391 prefix(REX_WRB); 6392 } 6393 } else { 6394 if (adr.index_needs_rex()) { 6395 prefix(REX_WRX); 6396 } else { 6397 prefix(REX_WR); 6398 } 6399 } 6400 } 6401 } 6402 6403 void Assembler::adcq(Register dst, int32_t imm32) { 6404 (void) prefixq_and_encode(dst->encoding()); 6405 emit_arith(0x81, 0xD0, dst, imm32); 6406 } 6407 6408 void Assembler::adcq(Register dst, Address src) { 6409 InstructionMark im(this); 6410 prefixq(src, dst); 6411 emit_int8(0x13); 6412 emit_operand(dst, src); 6413 } 6414 6415 void Assembler::adcq(Register dst, Register src) { 6416 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6417 emit_arith(0x13, 0xC0, dst, src); 6418 } 6419 6420 void Assembler::addq(Address dst, int32_t imm32) { 6421 InstructionMark im(this); 6422 prefixq(dst); 6423 emit_arith_operand(0x81, rax, dst,imm32); 6424 } 6425 6426 void Assembler::addq(Address dst, Register src) { 6427 InstructionMark im(this); 6428 prefixq(dst, src); 6429 emit_int8(0x01); 6430 emit_operand(src, dst); 6431 } 6432 6433 void Assembler::addq(Register dst, int32_t imm32) { 6434 (void) prefixq_and_encode(dst->encoding()); 6435 emit_arith(0x81, 0xC0, dst, imm32); 6436 } 6437 6438 void Assembler::addq(Register dst, Address src) { 6439 InstructionMark im(this); 6440 prefixq(src, dst); 6441 emit_int8(0x03); 6442 emit_operand(dst, src); 6443 } 6444 6445 void Assembler::addq(Register dst, Register src) { 6446 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6447 emit_arith(0x03, 0xC0, dst, src); 6448 } 6449 6450 void Assembler::adcxq(Register dst, Register src) { 6451 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 6452 emit_int8((unsigned char)0x66); 6453 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6454 emit_int8(0x0F); 6455 emit_int8(0x38); 6456 emit_int8((unsigned char)0xF6); 6457 emit_int8((unsigned char)(0xC0 | encode)); 6458 } 6459 6460 void Assembler::adoxq(Register dst, Register src) { 6461 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 6462 emit_int8((unsigned char)0xF3); 6463 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6464 emit_int8(0x0F); 6465 emit_int8(0x38); 6466 emit_int8((unsigned char)0xF6); 6467 emit_int8((unsigned char)(0xC0 | encode)); 6468 } 6469 6470 void Assembler::andq(Address dst, int32_t imm32) { 6471 InstructionMark im(this); 6472 prefixq(dst); 6473 emit_int8((unsigned char)0x81); 6474 emit_operand(rsp, dst, 4); 6475 emit_int32(imm32); 6476 } 6477 6478 void Assembler::andq(Register dst, int32_t imm32) { 6479 (void) prefixq_and_encode(dst->encoding()); 6480 emit_arith(0x81, 0xE0, dst, imm32); 6481 } 6482 6483 void Assembler::andq(Register dst, Address src) { 6484 InstructionMark im(this); 6485 prefixq(src, dst); 6486 emit_int8(0x23); 6487 emit_operand(dst, src); 6488 } 6489 6490 void Assembler::andq(Register dst, Register src) { 6491 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6492 emit_arith(0x23, 0xC0, dst, src); 6493 } 6494 6495 void Assembler::andnq(Register dst, Register src1, Register src2) { 6496 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6497 int encode = vex_prefix_0F38_and_encode_q_legacy(dst, src1, src2); 6498 emit_int8((unsigned char)0xF2); 6499 emit_int8((unsigned char)(0xC0 | encode)); 6500 } 6501 6502 void Assembler::andnq(Register dst, Register src1, Address src2) { 6503 InstructionMark im(this); 6504 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6505 vex_prefix_0F38_q_legacy(dst, src1, src2); 6506 emit_int8((unsigned char)0xF2); 6507 emit_operand(dst, src2); 6508 } 6509 6510 void Assembler::bsfq(Register dst, Register src) { 6511 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6512 emit_int8(0x0F); 6513 emit_int8((unsigned char)0xBC); 6514 emit_int8((unsigned char)(0xC0 | encode)); 6515 } 6516 6517 void Assembler::bsrq(Register dst, Register src) { 6518 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6519 emit_int8(0x0F); 6520 emit_int8((unsigned char)0xBD); 6521 emit_int8((unsigned char)(0xC0 | encode)); 6522 } 6523 6524 void Assembler::bswapq(Register reg) { 6525 int encode = prefixq_and_encode(reg->encoding()); 6526 emit_int8(0x0F); 6527 emit_int8((unsigned char)(0xC8 | encode)); 6528 } 6529 6530 void Assembler::blsiq(Register dst, Register src) { 6531 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6532 int encode = vex_prefix_0F38_and_encode_q_legacy(rbx, dst, src); 6533 emit_int8((unsigned char)0xF3); 6534 emit_int8((unsigned char)(0xC0 | encode)); 6535 } 6536 6537 void Assembler::blsiq(Register dst, Address src) { 6538 InstructionMark im(this); 6539 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6540 vex_prefix_0F38_q_legacy(rbx, dst, src); 6541 emit_int8((unsigned char)0xF3); 6542 emit_operand(rbx, src); 6543 } 6544 6545 void Assembler::blsmskq(Register dst, Register src) { 6546 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6547 int encode = vex_prefix_0F38_and_encode_q_legacy(rdx, dst, src); 6548 emit_int8((unsigned char)0xF3); 6549 emit_int8((unsigned char)(0xC0 | encode)); 6550 } 6551 6552 void Assembler::blsmskq(Register dst, Address src) { 6553 InstructionMark im(this); 6554 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6555 vex_prefix_0F38_q_legacy(rdx, dst, src); 6556 emit_int8((unsigned char)0xF3); 6557 emit_operand(rdx, src); 6558 } 6559 6560 void Assembler::blsrq(Register dst, Register src) { 6561 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6562 int encode = vex_prefix_0F38_and_encode_q_legacy(rcx, dst, src); 6563 emit_int8((unsigned char)0xF3); 6564 emit_int8((unsigned char)(0xC0 | encode)); 6565 } 6566 6567 void Assembler::blsrq(Register dst, Address src) { 6568 InstructionMark im(this); 6569 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 6570 vex_prefix_0F38_q_legacy(rcx, dst, src); 6571 emit_int8((unsigned char)0xF3); 6572 emit_operand(rcx, src); 6573 } 6574 6575 void Assembler::cdqq() { 6576 prefix(REX_W); 6577 emit_int8((unsigned char)0x99); 6578 } 6579 6580 void Assembler::clflush(Address adr) { 6581 prefix(adr); 6582 emit_int8(0x0F); 6583 emit_int8((unsigned char)0xAE); 6584 emit_operand(rdi, adr); 6585 } 6586 6587 void Assembler::cmovq(Condition cc, Register dst, Register src) { 6588 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6589 emit_int8(0x0F); 6590 emit_int8(0x40 | cc); 6591 emit_int8((unsigned char)(0xC0 | encode)); 6592 } 6593 6594 void Assembler::cmovq(Condition cc, Register dst, Address src) { 6595 InstructionMark im(this); 6596 prefixq(src, dst); 6597 emit_int8(0x0F); 6598 emit_int8(0x40 | cc); 6599 emit_operand(dst, src); 6600 } 6601 6602 void Assembler::cmpq(Address dst, int32_t imm32) { 6603 InstructionMark im(this); 6604 prefixq(dst); 6605 emit_int8((unsigned char)0x81); 6606 emit_operand(rdi, dst, 4); 6607 emit_int32(imm32); 6608 } 6609 6610 void Assembler::cmpq(Register dst, int32_t imm32) { 6611 (void) prefixq_and_encode(dst->encoding()); 6612 emit_arith(0x81, 0xF8, dst, imm32); 6613 } 6614 6615 void Assembler::cmpq(Address dst, Register src) { 6616 InstructionMark im(this); 6617 prefixq(dst, src); 6618 emit_int8(0x3B); 6619 emit_operand(src, dst); 6620 } 6621 6622 void Assembler::cmpq(Register dst, Register src) { 6623 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 6624 emit_arith(0x3B, 0xC0, dst, src); 6625 } 6626 6627 void Assembler::cmpq(Register dst, Address src) { 6628 InstructionMark im(this); 6629 prefixq(src, dst); 6630 emit_int8(0x3B); 6631 emit_operand(dst, src); 6632 } 6633 6634 void Assembler::cmpxchgq(Register reg, Address adr) { 6635 InstructionMark im(this); 6636 prefixq(adr, reg); 6637 emit_int8(0x0F); 6638 emit_int8((unsigned char)0xB1); 6639 emit_operand(reg, adr); 6640 } 6641 6642 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { 6643 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6644 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2, true); 6645 emit_int8(0x2A); 6646 emit_int8((unsigned char)(0xC0 | encode)); 6647 } 6648 6649 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { 6650 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6651 if (VM_Version::supports_evex()) { 6652 tuple_type = EVEX_T1S; 6653 input_size_in_bits = EVEX_32bit; 6654 } 6655 InstructionMark im(this); 6656 simd_prefix_q(dst, dst, src, VEX_SIMD_F2, true); 6657 emit_int8(0x2A); 6658 emit_operand(dst, src); 6659 } 6660 6661 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { 6662 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6663 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3, true); 6664 emit_int8(0x2A); 6665 emit_int8((unsigned char)(0xC0 | encode)); 6666 } 6667 6668 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) { 6669 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6670 if (VM_Version::supports_evex()) { 6671 tuple_type = EVEX_T1S; 6672 input_size_in_bits = EVEX_32bit; 6673 } 6674 InstructionMark im(this); 6675 simd_prefix_q(dst, dst, src, VEX_SIMD_F3, true); 6676 emit_int8(0x2A); 6677 emit_operand(dst, src); 6678 } 6679 6680 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { 6681 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6682 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, true); 6683 emit_int8(0x2C); 6684 emit_int8((unsigned char)(0xC0 | encode)); 6685 } 6686 6687 void Assembler::cvttss2siq(Register dst, XMMRegister src) { 6688 NOT_LP64(assert(VM_Version::supports_sse(), "")); 6689 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, true); 6690 emit_int8(0x2C); 6691 emit_int8((unsigned char)(0xC0 | encode)); 6692 } 6693 6694 void Assembler::decl(Register dst) { 6695 // Don't use it directly. Use MacroAssembler::decrementl() instead. 6696 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) 6697 int encode = prefix_and_encode(dst->encoding()); 6698 emit_int8((unsigned char)0xFF); 6699 emit_int8((unsigned char)(0xC8 | encode)); 6700 } 6701 6702 void Assembler::decq(Register dst) { 6703 // Don't use it directly. Use MacroAssembler::decrementq() instead. 6704 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 6705 int encode = prefixq_and_encode(dst->encoding()); 6706 emit_int8((unsigned char)0xFF); 6707 emit_int8(0xC8 | encode); 6708 } 6709 6710 void Assembler::decq(Address dst) { 6711 // Don't use it directly. Use MacroAssembler::decrementq() instead. 6712 InstructionMark im(this); 6713 prefixq(dst); 6714 emit_int8((unsigned char)0xFF); 6715 emit_operand(rcx, dst); 6716 } 6717 6718 void Assembler::fxrstor(Address src) { 6719 prefixq(src); 6720 emit_int8(0x0F); 6721 emit_int8((unsigned char)0xAE); 6722 emit_operand(as_Register(1), src); 6723 } 6724 6725 void Assembler::fxsave(Address dst) { 6726 prefixq(dst); 6727 emit_int8(0x0F); 6728 emit_int8((unsigned char)0xAE); 6729 emit_operand(as_Register(0), dst); 6730 } 6731 6732 void Assembler::idivq(Register src) { 6733 int encode = prefixq_and_encode(src->encoding()); 6734 emit_int8((unsigned char)0xF7); 6735 emit_int8((unsigned char)(0xF8 | encode)); 6736 } 6737 6738 void Assembler::imulq(Register dst, Register src) { 6739 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6740 emit_int8(0x0F); 6741 emit_int8((unsigned char)0xAF); 6742 emit_int8((unsigned char)(0xC0 | encode)); 6743 } 6744 6745 void Assembler::imulq(Register dst, Register src, int value) { 6746 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6747 if (is8bit(value)) { 6748 emit_int8(0x6B); 6749 emit_int8((unsigned char)(0xC0 | encode)); 6750 emit_int8(value & 0xFF); 6751 } else { 6752 emit_int8(0x69); 6753 emit_int8((unsigned char)(0xC0 | encode)); 6754 emit_int32(value); 6755 } 6756 } 6757 6758 void Assembler::imulq(Register dst, Address src) { 6759 InstructionMark im(this); 6760 prefixq(src, dst); 6761 emit_int8(0x0F); 6762 emit_int8((unsigned char) 0xAF); 6763 emit_operand(dst, src); 6764 } 6765 6766 void Assembler::incl(Register dst) { 6767 // Don't use it directly. Use MacroAssembler::incrementl() instead. 6768 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 6769 int encode = prefix_and_encode(dst->encoding()); 6770 emit_int8((unsigned char)0xFF); 6771 emit_int8((unsigned char)(0xC0 | encode)); 6772 } 6773 6774 void Assembler::incq(Register dst) { 6775 // Don't use it directly. Use MacroAssembler::incrementq() instead. 6776 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 6777 int encode = prefixq_and_encode(dst->encoding()); 6778 emit_int8((unsigned char)0xFF); 6779 emit_int8((unsigned char)(0xC0 | encode)); 6780 } 6781 6782 void Assembler::incq(Address dst) { 6783 // Don't use it directly. Use MacroAssembler::incrementq() instead. 6784 InstructionMark im(this); 6785 prefixq(dst); 6786 emit_int8((unsigned char)0xFF); 6787 emit_operand(rax, dst); 6788 } 6789 6790 void Assembler::lea(Register dst, Address src) { 6791 leaq(dst, src); 6792 } 6793 6794 void Assembler::leaq(Register dst, Address src) { 6795 InstructionMark im(this); 6796 prefixq(src, dst); 6797 emit_int8((unsigned char)0x8D); 6798 emit_operand(dst, src); 6799 } 6800 6801 void Assembler::mov64(Register dst, int64_t imm64) { 6802 InstructionMark im(this); 6803 int encode = prefixq_and_encode(dst->encoding()); 6804 emit_int8((unsigned char)(0xB8 | encode)); 6805 emit_int64(imm64); 6806 } 6807 6808 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 6809 InstructionMark im(this); 6810 int encode = prefixq_and_encode(dst->encoding()); 6811 emit_int8(0xB8 | encode); 6812 emit_data64(imm64, rspec); 6813 } 6814 6815 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { 6816 InstructionMark im(this); 6817 int encode = prefix_and_encode(dst->encoding()); 6818 emit_int8((unsigned char)(0xB8 | encode)); 6819 emit_data((int)imm32, rspec, narrow_oop_operand); 6820 } 6821 6822 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { 6823 InstructionMark im(this); 6824 prefix(dst); 6825 emit_int8((unsigned char)0xC7); 6826 emit_operand(rax, dst, 4); 6827 emit_data((int)imm32, rspec, narrow_oop_operand); 6828 } 6829 6830 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { 6831 InstructionMark im(this); 6832 int encode = prefix_and_encode(src1->encoding()); 6833 emit_int8((unsigned char)0x81); 6834 emit_int8((unsigned char)(0xF8 | encode)); 6835 emit_data((int)imm32, rspec, narrow_oop_operand); 6836 } 6837 6838 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { 6839 InstructionMark im(this); 6840 prefix(src1); 6841 emit_int8((unsigned char)0x81); 6842 emit_operand(rax, src1, 4); 6843 emit_data((int)imm32, rspec, narrow_oop_operand); 6844 } 6845 6846 void Assembler::lzcntq(Register dst, Register src) { 6847 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 6848 emit_int8((unsigned char)0xF3); 6849 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6850 emit_int8(0x0F); 6851 emit_int8((unsigned char)0xBD); 6852 emit_int8((unsigned char)(0xC0 | encode)); 6853 } 6854 6855 void Assembler::movdq(XMMRegister dst, Register src) { 6856 // table D-1 says MMX/SSE2 6857 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6858 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66, true); 6859 emit_int8(0x6E); 6860 emit_int8((unsigned char)(0xC0 | encode)); 6861 } 6862 6863 void Assembler::movdq(Register dst, XMMRegister src) { 6864 // table D-1 says MMX/SSE2 6865 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6866 // swap src/dst to get correct prefix 6867 int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66, true); 6868 emit_int8(0x7E); 6869 emit_int8((unsigned char)(0xC0 | encode)); 6870 } 6871 6872 void Assembler::movq(Register dst, Register src) { 6873 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6874 emit_int8((unsigned char)0x8B); 6875 emit_int8((unsigned char)(0xC0 | encode)); 6876 } 6877 6878 void Assembler::movq(Register dst, Address src) { 6879 InstructionMark im(this); 6880 prefixq(src, dst); 6881 emit_int8((unsigned char)0x8B); 6882 emit_operand(dst, src); 6883 } 6884 6885 void Assembler::movq(Address dst, Register src) { 6886 InstructionMark im(this); 6887 prefixq(dst, src); 6888 emit_int8((unsigned char)0x89); 6889 emit_operand(src, dst); 6890 } 6891 6892 void Assembler::movsbq(Register dst, Address src) { 6893 InstructionMark im(this); 6894 prefixq(src, dst); 6895 emit_int8(0x0F); 6896 emit_int8((unsigned char)0xBE); 6897 emit_operand(dst, src); 6898 } 6899 6900 void Assembler::movsbq(Register dst, Register src) { 6901 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6902 emit_int8(0x0F); 6903 emit_int8((unsigned char)0xBE); 6904 emit_int8((unsigned char)(0xC0 | encode)); 6905 } 6906 6907 void Assembler::movslq(Register dst, int32_t imm32) { 6908 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 6909 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 6910 // as a result we shouldn't use until tested at runtime... 6911 ShouldNotReachHere(); 6912 InstructionMark im(this); 6913 int encode = prefixq_and_encode(dst->encoding()); 6914 emit_int8((unsigned char)(0xC7 | encode)); 6915 emit_int32(imm32); 6916 } 6917 6918 void Assembler::movslq(Address dst, int32_t imm32) { 6919 assert(is_simm32(imm32), "lost bits"); 6920 InstructionMark im(this); 6921 prefixq(dst); 6922 emit_int8((unsigned char)0xC7); 6923 emit_operand(rax, dst, 4); 6924 emit_int32(imm32); 6925 } 6926 6927 void Assembler::movslq(Register dst, Address src) { 6928 InstructionMark im(this); 6929 prefixq(src, dst); 6930 emit_int8(0x63); 6931 emit_operand(dst, src); 6932 } 6933 6934 void Assembler::movslq(Register dst, Register src) { 6935 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6936 emit_int8(0x63); 6937 emit_int8((unsigned char)(0xC0 | encode)); 6938 } 6939 6940 void Assembler::movswq(Register dst, Address src) { 6941 InstructionMark im(this); 6942 prefixq(src, dst); 6943 emit_int8(0x0F); 6944 emit_int8((unsigned char)0xBF); 6945 emit_operand(dst, src); 6946 } 6947 6948 void Assembler::movswq(Register dst, Register src) { 6949 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6950 emit_int8((unsigned char)0x0F); 6951 emit_int8((unsigned char)0xBF); 6952 emit_int8((unsigned char)(0xC0 | encode)); 6953 } 6954 6955 void Assembler::movzbq(Register dst, Address src) { 6956 InstructionMark im(this); 6957 prefixq(src, dst); 6958 emit_int8((unsigned char)0x0F); 6959 emit_int8((unsigned char)0xB6); 6960 emit_operand(dst, src); 6961 } 6962 6963 void Assembler::movzbq(Register dst, Register src) { 6964 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6965 emit_int8(0x0F); 6966 emit_int8((unsigned char)0xB6); 6967 emit_int8(0xC0 | encode); 6968 } 6969 6970 void Assembler::movzwq(Register dst, Address src) { 6971 InstructionMark im(this); 6972 prefixq(src, dst); 6973 emit_int8((unsigned char)0x0F); 6974 emit_int8((unsigned char)0xB7); 6975 emit_operand(dst, src); 6976 } 6977 6978 void Assembler::movzwq(Register dst, Register src) { 6979 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 6980 emit_int8((unsigned char)0x0F); 6981 emit_int8((unsigned char)0xB7); 6982 emit_int8((unsigned char)(0xC0 | encode)); 6983 } 6984 6985 void Assembler::mulq(Address src) { 6986 InstructionMark im(this); 6987 prefixq(src); 6988 emit_int8((unsigned char)0xF7); 6989 emit_operand(rsp, src); 6990 } 6991 6992 void Assembler::mulq(Register src) { 6993 int encode = prefixq_and_encode(src->encoding()); 6994 emit_int8((unsigned char)0xF7); 6995 emit_int8((unsigned char)(0xE0 | encode)); 6996 } 6997 6998 void Assembler::mulxq(Register dst1, Register dst2, Register src) { 6999 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 7000 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), 7001 VEX_SIMD_F2, VEX_OPCODE_0F_38, true, AVX_128bit, true, false); 7002 emit_int8((unsigned char)0xF6); 7003 emit_int8((unsigned char)(0xC0 | encode)); 7004 } 7005 7006 void Assembler::negq(Register dst) { 7007 int encode = prefixq_and_encode(dst->encoding()); 7008 emit_int8((unsigned char)0xF7); 7009 emit_int8((unsigned char)(0xD8 | encode)); 7010 } 7011 7012 void Assembler::notq(Register dst) { 7013 int encode = prefixq_and_encode(dst->encoding()); 7014 emit_int8((unsigned char)0xF7); 7015 emit_int8((unsigned char)(0xD0 | encode)); 7016 } 7017 7018 void Assembler::orq(Address dst, int32_t imm32) { 7019 InstructionMark im(this); 7020 prefixq(dst); 7021 emit_int8((unsigned char)0x81); 7022 emit_operand(rcx, dst, 4); 7023 emit_int32(imm32); 7024 } 7025 7026 void Assembler::orq(Register dst, int32_t imm32) { 7027 (void) prefixq_and_encode(dst->encoding()); 7028 emit_arith(0x81, 0xC8, dst, imm32); 7029 } 7030 7031 void Assembler::orq(Register dst, Address src) { 7032 InstructionMark im(this); 7033 prefixq(src, dst); 7034 emit_int8(0x0B); 7035 emit_operand(dst, src); 7036 } 7037 7038 void Assembler::orq(Register dst, Register src) { 7039 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7040 emit_arith(0x0B, 0xC0, dst, src); 7041 } 7042 7043 void Assembler::popa() { // 64bit 7044 movq(r15, Address(rsp, 0)); 7045 movq(r14, Address(rsp, wordSize)); 7046 movq(r13, Address(rsp, 2 * wordSize)); 7047 movq(r12, Address(rsp, 3 * wordSize)); 7048 movq(r11, Address(rsp, 4 * wordSize)); 7049 movq(r10, Address(rsp, 5 * wordSize)); 7050 movq(r9, Address(rsp, 6 * wordSize)); 7051 movq(r8, Address(rsp, 7 * wordSize)); 7052 movq(rdi, Address(rsp, 8 * wordSize)); 7053 movq(rsi, Address(rsp, 9 * wordSize)); 7054 movq(rbp, Address(rsp, 10 * wordSize)); 7055 // skip rsp 7056 movq(rbx, Address(rsp, 12 * wordSize)); 7057 movq(rdx, Address(rsp, 13 * wordSize)); 7058 movq(rcx, Address(rsp, 14 * wordSize)); 7059 movq(rax, Address(rsp, 15 * wordSize)); 7060 7061 addq(rsp, 16 * wordSize); 7062 } 7063 7064 void Assembler::popcntq(Register dst, Address src) { 7065 assert(VM_Version::supports_popcnt(), "must support"); 7066 InstructionMark im(this); 7067 emit_int8((unsigned char)0xF3); 7068 prefixq(src, dst); 7069 emit_int8((unsigned char)0x0F); 7070 emit_int8((unsigned char)0xB8); 7071 emit_operand(dst, src); 7072 } 7073 7074 void Assembler::popcntq(Register dst, Register src) { 7075 assert(VM_Version::supports_popcnt(), "must support"); 7076 emit_int8((unsigned char)0xF3); 7077 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7078 emit_int8((unsigned char)0x0F); 7079 emit_int8((unsigned char)0xB8); 7080 emit_int8((unsigned char)(0xC0 | encode)); 7081 } 7082 7083 void Assembler::popq(Address dst) { 7084 InstructionMark im(this); 7085 prefixq(dst); 7086 emit_int8((unsigned char)0x8F); 7087 emit_operand(rax, dst); 7088 } 7089 7090 void Assembler::pusha() { // 64bit 7091 // we have to store original rsp. ABI says that 128 bytes 7092 // below rsp are local scratch. 7093 movq(Address(rsp, -5 * wordSize), rsp); 7094 7095 subq(rsp, 16 * wordSize); 7096 7097 movq(Address(rsp, 15 * wordSize), rax); 7098 movq(Address(rsp, 14 * wordSize), rcx); 7099 movq(Address(rsp, 13 * wordSize), rdx); 7100 movq(Address(rsp, 12 * wordSize), rbx); 7101 // skip rsp 7102 movq(Address(rsp, 10 * wordSize), rbp); 7103 movq(Address(rsp, 9 * wordSize), rsi); 7104 movq(Address(rsp, 8 * wordSize), rdi); 7105 movq(Address(rsp, 7 * wordSize), r8); 7106 movq(Address(rsp, 6 * wordSize), r9); 7107 movq(Address(rsp, 5 * wordSize), r10); 7108 movq(Address(rsp, 4 * wordSize), r11); 7109 movq(Address(rsp, 3 * wordSize), r12); 7110 movq(Address(rsp, 2 * wordSize), r13); 7111 movq(Address(rsp, wordSize), r14); 7112 movq(Address(rsp, 0), r15); 7113 } 7114 7115 void Assembler::pushq(Address src) { 7116 InstructionMark im(this); 7117 prefixq(src); 7118 emit_int8((unsigned char)0xFF); 7119 emit_operand(rsi, src); 7120 } 7121 7122 void Assembler::rclq(Register dst, int imm8) { 7123 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7124 int encode = prefixq_and_encode(dst->encoding()); 7125 if (imm8 == 1) { 7126 emit_int8((unsigned char)0xD1); 7127 emit_int8((unsigned char)(0xD0 | encode)); 7128 } else { 7129 emit_int8((unsigned char)0xC1); 7130 emit_int8((unsigned char)(0xD0 | encode)); 7131 emit_int8(imm8); 7132 } 7133 } 7134 7135 void Assembler::rcrq(Register dst, int imm8) { 7136 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7137 int encode = prefixq_and_encode(dst->encoding()); 7138 if (imm8 == 1) { 7139 emit_int8((unsigned char)0xD1); 7140 emit_int8((unsigned char)(0xD8 | encode)); 7141 } else { 7142 emit_int8((unsigned char)0xC1); 7143 emit_int8((unsigned char)(0xD8 | encode)); 7144 emit_int8(imm8); 7145 } 7146 } 7147 7148 void Assembler::rorq(Register dst, int imm8) { 7149 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7150 int encode = prefixq_and_encode(dst->encoding()); 7151 if (imm8 == 1) { 7152 emit_int8((unsigned char)0xD1); 7153 emit_int8((unsigned char)(0xC8 | encode)); 7154 } else { 7155 emit_int8((unsigned char)0xC1); 7156 emit_int8((unsigned char)(0xc8 | encode)); 7157 emit_int8(imm8); 7158 } 7159 } 7160 7161 void Assembler::rorxq(Register dst, Register src, int imm8) { 7162 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 7163 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, 7164 VEX_OPCODE_0F_3A, true, AVX_128bit, true, false); 7165 emit_int8((unsigned char)0xF0); 7166 emit_int8((unsigned char)(0xC0 | encode)); 7167 emit_int8(imm8); 7168 } 7169 7170 void Assembler::sarq(Register dst, int imm8) { 7171 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7172 int encode = prefixq_and_encode(dst->encoding()); 7173 if (imm8 == 1) { 7174 emit_int8((unsigned char)0xD1); 7175 emit_int8((unsigned char)(0xF8 | encode)); 7176 } else { 7177 emit_int8((unsigned char)0xC1); 7178 emit_int8((unsigned char)(0xF8 | encode)); 7179 emit_int8(imm8); 7180 } 7181 } 7182 7183 void Assembler::sarq(Register dst) { 7184 int encode = prefixq_and_encode(dst->encoding()); 7185 emit_int8((unsigned char)0xD3); 7186 emit_int8((unsigned char)(0xF8 | encode)); 7187 } 7188 7189 void Assembler::sbbq(Address dst, int32_t imm32) { 7190 InstructionMark im(this); 7191 prefixq(dst); 7192 emit_arith_operand(0x81, rbx, dst, imm32); 7193 } 7194 7195 void Assembler::sbbq(Register dst, int32_t imm32) { 7196 (void) prefixq_and_encode(dst->encoding()); 7197 emit_arith(0x81, 0xD8, dst, imm32); 7198 } 7199 7200 void Assembler::sbbq(Register dst, Address src) { 7201 InstructionMark im(this); 7202 prefixq(src, dst); 7203 emit_int8(0x1B); 7204 emit_operand(dst, src); 7205 } 7206 7207 void Assembler::sbbq(Register dst, Register src) { 7208 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7209 emit_arith(0x1B, 0xC0, dst, src); 7210 } 7211 7212 void Assembler::shlq(Register dst, int imm8) { 7213 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7214 int encode = prefixq_and_encode(dst->encoding()); 7215 if (imm8 == 1) { 7216 emit_int8((unsigned char)0xD1); 7217 emit_int8((unsigned char)(0xE0 | encode)); 7218 } else { 7219 emit_int8((unsigned char)0xC1); 7220 emit_int8((unsigned char)(0xE0 | encode)); 7221 emit_int8(imm8); 7222 } 7223 } 7224 7225 void Assembler::shlq(Register dst) { 7226 int encode = prefixq_and_encode(dst->encoding()); 7227 emit_int8((unsigned char)0xD3); 7228 emit_int8((unsigned char)(0xE0 | encode)); 7229 } 7230 7231 void Assembler::shrq(Register dst, int imm8) { 7232 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 7233 int encode = prefixq_and_encode(dst->encoding()); 7234 emit_int8((unsigned char)0xC1); 7235 emit_int8((unsigned char)(0xE8 | encode)); 7236 emit_int8(imm8); 7237 } 7238 7239 void Assembler::shrq(Register dst) { 7240 int encode = prefixq_and_encode(dst->encoding()); 7241 emit_int8((unsigned char)0xD3); 7242 emit_int8(0xE8 | encode); 7243 } 7244 7245 void Assembler::subq(Address dst, int32_t imm32) { 7246 InstructionMark im(this); 7247 prefixq(dst); 7248 emit_arith_operand(0x81, rbp, dst, imm32); 7249 } 7250 7251 void Assembler::subq(Address dst, Register src) { 7252 InstructionMark im(this); 7253 prefixq(dst, src); 7254 emit_int8(0x29); 7255 emit_operand(src, dst); 7256 } 7257 7258 void Assembler::subq(Register dst, int32_t imm32) { 7259 (void) prefixq_and_encode(dst->encoding()); 7260 emit_arith(0x81, 0xE8, dst, imm32); 7261 } 7262 7263 // Force generation of a 4 byte immediate value even if it fits into 8bit 7264 void Assembler::subq_imm32(Register dst, int32_t imm32) { 7265 (void) prefixq_and_encode(dst->encoding()); 7266 emit_arith_imm32(0x81, 0xE8, dst, imm32); 7267 } 7268 7269 void Assembler::subq(Register dst, Address src) { 7270 InstructionMark im(this); 7271 prefixq(src, dst); 7272 emit_int8(0x2B); 7273 emit_operand(dst, src); 7274 } 7275 7276 void Assembler::subq(Register dst, Register src) { 7277 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7278 emit_arith(0x2B, 0xC0, dst, src); 7279 } 7280 7281 void Assembler::testq(Register dst, int32_t imm32) { 7282 // not using emit_arith because test 7283 // doesn't support sign-extension of 7284 // 8bit operands 7285 int encode = dst->encoding(); 7286 if (encode == 0) { 7287 prefix(REX_W); 7288 emit_int8((unsigned char)0xA9); 7289 } else { 7290 encode = prefixq_and_encode(encode); 7291 emit_int8((unsigned char)0xF7); 7292 emit_int8((unsigned char)(0xC0 | encode)); 7293 } 7294 emit_int32(imm32); 7295 } 7296 7297 void Assembler::testq(Register dst, Register src) { 7298 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7299 emit_arith(0x85, 0xC0, dst, src); 7300 } 7301 7302 void Assembler::xaddq(Address dst, Register src) { 7303 InstructionMark im(this); 7304 prefixq(dst, src); 7305 emit_int8(0x0F); 7306 emit_int8((unsigned char)0xC1); 7307 emit_operand(src, dst); 7308 } 7309 7310 void Assembler::xchgq(Register dst, Address src) { 7311 InstructionMark im(this); 7312 prefixq(src, dst); 7313 emit_int8((unsigned char)0x87); 7314 emit_operand(dst, src); 7315 } 7316 7317 void Assembler::xchgq(Register dst, Register src) { 7318 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 7319 emit_int8((unsigned char)0x87); 7320 emit_int8((unsigned char)(0xc0 | encode)); 7321 } 7322 7323 void Assembler::xorq(Register dst, Register src) { 7324 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 7325 emit_arith(0x33, 0xC0, dst, src); 7326 } 7327 7328 void Assembler::xorq(Register dst, Address src) { 7329 InstructionMark im(this); 7330 prefixq(src, dst); 7331 emit_int8(0x33); 7332 emit_operand(dst, src); 7333 } 7334 7335 #endif // !LP64