1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP 26 #define CPU_X86_VM_ASSEMBLER_X86_HPP 27 28 #include "asm/register.hpp" 29 #include "vm_version_x86.hpp" 30 31 class BiasedLockingCounters; 32 33 // Contains all the definitions needed for x86 assembly code generation. 34 35 // Calling convention 36 class Argument { 37 public: 38 enum { 39 #ifdef _LP64 40 #ifdef _WIN64 41 n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 42 n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) 43 #else 44 n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 45 n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) 46 #endif // _WIN64 47 n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... 48 n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... 49 #else 50 n_register_parameters = 0 // 0 registers used to pass arguments 51 #endif // _LP64 52 }; 53 }; 54 55 56 #ifdef _LP64 57 // Symbolically name the register arguments used by the c calling convention. 58 // Windows is different from linux/solaris. So much for standards... 59 60 #ifdef _WIN64 61 62 REGISTER_DECLARATION(Register, c_rarg0, rcx); 63 REGISTER_DECLARATION(Register, c_rarg1, rdx); 64 REGISTER_DECLARATION(Register, c_rarg2, r8); 65 REGISTER_DECLARATION(Register, c_rarg3, r9); 66 67 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 68 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 69 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 70 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 71 72 #else 73 74 REGISTER_DECLARATION(Register, c_rarg0, rdi); 75 REGISTER_DECLARATION(Register, c_rarg1, rsi); 76 REGISTER_DECLARATION(Register, c_rarg2, rdx); 77 REGISTER_DECLARATION(Register, c_rarg3, rcx); 78 REGISTER_DECLARATION(Register, c_rarg4, r8); 79 REGISTER_DECLARATION(Register, c_rarg5, r9); 80 81 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 82 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 83 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 84 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 85 REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); 86 REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); 87 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); 88 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); 89 90 #endif // _WIN64 91 92 // Symbolically name the register arguments used by the Java calling convention. 93 // We have control over the convention for java so we can do what we please. 94 // What pleases us is to offset the java calling convention so that when 95 // we call a suitable jni method the arguments are lined up and we don't 96 // have to do little shuffling. A suitable jni method is non-static and a 97 // small number of arguments (two fewer args on windows) 98 // 99 // |-------------------------------------------------------| 100 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | 101 // |-------------------------------------------------------| 102 // | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) 103 // | rdi rsi rdx rcx r8 r9 | solaris/linux 104 // |-------------------------------------------------------| 105 // | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | 106 // |-------------------------------------------------------| 107 108 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); 109 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); 110 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); 111 // Windows runs out of register args here 112 #ifdef _WIN64 113 REGISTER_DECLARATION(Register, j_rarg3, rdi); 114 REGISTER_DECLARATION(Register, j_rarg4, rsi); 115 #else 116 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); 117 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); 118 #endif /* _WIN64 */ 119 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); 120 121 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); 122 REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); 123 REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); 124 REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); 125 REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); 126 REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); 127 REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); 128 REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); 129 130 REGISTER_DECLARATION(Register, rscratch1, r10); // volatile 131 REGISTER_DECLARATION(Register, rscratch2, r11); // volatile 132 133 REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved 134 REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved 135 136 #else 137 // rscratch1 will apear in 32bit code that is dead but of course must compile 138 // Using noreg ensures if the dead code is incorrectly live and executed it 139 // will cause an assertion failure 140 #define rscratch1 noreg 141 #define rscratch2 noreg 142 143 #endif // _LP64 144 145 // JSR 292 146 // On x86, the SP does not have to be saved when invoking method handle intrinsics 147 // or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg. 148 REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg); 149 150 // Address is an abstraction used to represent a memory location 151 // using any of the amd64 addressing modes with one object. 152 // 153 // Note: A register location is represented via a Register, not 154 // via an address for efficiency & simplicity reasons. 155 156 class ArrayAddress; 157 158 class Address { 159 public: 160 enum ScaleFactor { 161 no_scale = -1, 162 times_1 = 0, 163 times_2 = 1, 164 times_4 = 2, 165 times_8 = 3, 166 times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4) 167 }; 168 static ScaleFactor times(int size) { 169 assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size"); 170 if (size == 8) return times_8; 171 if (size == 4) return times_4; 172 if (size == 2) return times_2; 173 return times_1; 174 } 175 static int scale_size(ScaleFactor scale) { 176 assert(scale != no_scale, ""); 177 assert(((1 << (int)times_1) == 1 && 178 (1 << (int)times_2) == 2 && 179 (1 << (int)times_4) == 4 && 180 (1 << (int)times_8) == 8), ""); 181 return (1 << (int)scale); 182 } 183 184 private: 185 Register _base; 186 Register _index; 187 XMMRegister _xmmindex; 188 ScaleFactor _scale; 189 int _disp; 190 bool _isxmmindex; 191 RelocationHolder _rspec; 192 193 // Easily misused constructors make them private 194 // %%% can we make these go away? 195 NOT_LP64(Address(address loc, RelocationHolder spec);) 196 Address(int disp, address loc, relocInfo::relocType rtype); 197 Address(int disp, address loc, RelocationHolder spec); 198 199 public: 200 201 int disp() { return _disp; } 202 // creation 203 Address() 204 : _base(noreg), 205 _index(noreg), 206 _xmmindex(xnoreg), 207 _scale(no_scale), 208 _disp(0), 209 _isxmmindex(false){ 210 } 211 212 // No default displacement otherwise Register can be implicitly 213 // converted to 0(Register) which is quite a different animal. 214 215 Address(Register base, int disp) 216 : _base(base), 217 _index(noreg), 218 _xmmindex(xnoreg), 219 _scale(no_scale), 220 _disp(disp), 221 _isxmmindex(false){ 222 } 223 224 Address(Register base, Register index, ScaleFactor scale, int disp = 0) 225 : _base (base), 226 _index(index), 227 _xmmindex(xnoreg), 228 _scale(scale), 229 _disp (disp), 230 _isxmmindex(false) { 231 assert(!index->is_valid() == (scale == Address::no_scale), 232 "inconsistent address"); 233 } 234 235 Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0) 236 : _base (base), 237 _index(index.register_or_noreg()), 238 _xmmindex(xnoreg), 239 _scale(scale), 240 _disp (disp + (index.constant_or_zero() * scale_size(scale))), 241 _isxmmindex(false){ 242 if (!index.is_register()) scale = Address::no_scale; 243 assert(!_index->is_valid() == (scale == Address::no_scale), 244 "inconsistent address"); 245 } 246 247 Address(Register base, XMMRegister index, ScaleFactor scale, int disp = 0) 248 : _base (base), 249 _index(noreg), 250 _xmmindex(index), 251 _scale(scale), 252 _disp(disp), 253 _isxmmindex(true) { 254 assert(!index->is_valid() == (scale == Address::no_scale), 255 "inconsistent address"); 256 } 257 258 Address plus_disp(int disp) const { 259 Address a = (*this); 260 a._disp += disp; 261 return a; 262 } 263 Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const { 264 Address a = (*this); 265 a._disp += disp.constant_or_zero() * scale_size(scale); 266 if (disp.is_register()) { 267 assert(!a.index()->is_valid(), "competing indexes"); 268 a._index = disp.as_register(); 269 a._scale = scale; 270 } 271 return a; 272 } 273 bool is_same_address(Address a) const { 274 // disregard _rspec 275 return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale; 276 } 277 278 // The following two overloads are used in connection with the 279 // ByteSize type (see sizes.hpp). They simplify the use of 280 // ByteSize'd arguments in assembly code. Note that their equivalent 281 // for the optimized build are the member functions with int disp 282 // argument since ByteSize is mapped to an int type in that case. 283 // 284 // Note: DO NOT introduce similar overloaded functions for WordSize 285 // arguments as in the optimized mode, both ByteSize and WordSize 286 // are mapped to the same type and thus the compiler cannot make a 287 // distinction anymore (=> compiler errors). 288 289 #ifdef ASSERT 290 Address(Register base, ByteSize disp) 291 : _base(base), 292 _index(noreg), 293 _xmmindex(xnoreg), 294 _scale(no_scale), 295 _disp(in_bytes(disp)), 296 _isxmmindex(false){ 297 } 298 299 Address(Register base, Register index, ScaleFactor scale, ByteSize disp) 300 : _base(base), 301 _index(index), 302 _xmmindex(xnoreg), 303 _scale(scale), 304 _disp(in_bytes(disp)), 305 _isxmmindex(false){ 306 assert(!index->is_valid() == (scale == Address::no_scale), 307 "inconsistent address"); 308 } 309 Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp) 310 : _base (base), 311 _index(index.register_or_noreg()), 312 _xmmindex(xnoreg), 313 _scale(scale), 314 _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))), 315 _isxmmindex(false) { 316 if (!index.is_register()) scale = Address::no_scale; 317 assert(!_index->is_valid() == (scale == Address::no_scale), 318 "inconsistent address"); 319 } 320 321 #endif // ASSERT 322 323 // accessors 324 bool uses(Register reg) const { return _base == reg || _index == reg; } 325 Register base() const { return _base; } 326 Register index() const { return _index; } 327 XMMRegister xmmindex() const { return _xmmindex; } 328 ScaleFactor scale() const { return _scale; } 329 int disp() const { return _disp; } 330 bool isxmmindex() const { return _isxmmindex; } 331 332 // Convert the raw encoding form into the form expected by the constructor for 333 // Address. An index of 4 (rsp) corresponds to having no index, so convert 334 // that to noreg for the Address constructor. 335 static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc); 336 337 static Address make_array(ArrayAddress); 338 339 private: 340 bool base_needs_rex() const { 341 return _base != noreg && _base->encoding() >= 8; 342 } 343 344 bool index_needs_rex() const { 345 return _index != noreg &&_index->encoding() >= 8; 346 } 347 348 bool xmmindex_needs_rex() const { 349 return _xmmindex != xnoreg && _xmmindex->encoding() >= 8; 350 } 351 352 relocInfo::relocType reloc() const { return _rspec.type(); } 353 354 friend class Assembler; 355 friend class MacroAssembler; 356 friend class LIR_Assembler; // base/index/scale/disp 357 }; 358 359 // 360 // AddressLiteral has been split out from Address because operands of this type 361 // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out 362 // the few instructions that need to deal with address literals are unique and the 363 // MacroAssembler does not have to implement every instruction in the Assembler 364 // in order to search for address literals that may need special handling depending 365 // on the instruction and the platform. As small step on the way to merging i486/amd64 366 // directories. 367 // 368 class AddressLiteral { 369 friend class ArrayAddress; 370 RelocationHolder _rspec; 371 // Typically we use AddressLiterals we want to use their rval 372 // However in some situations we want the lval (effect address) of the item. 373 // We provide a special factory for making those lvals. 374 bool _is_lval; 375 376 // If the target is far we'll need to load the ea of this to 377 // a register to reach it. Otherwise if near we can do rip 378 // relative addressing. 379 380 address _target; 381 382 protected: 383 // creation 384 AddressLiteral() 385 : _is_lval(false), 386 _target(NULL) 387 {} 388 389 public: 390 391 392 AddressLiteral(address target, relocInfo::relocType rtype); 393 394 AddressLiteral(address target, RelocationHolder const& rspec) 395 : _rspec(rspec), 396 _is_lval(false), 397 _target(target) 398 {} 399 400 AddressLiteral addr() { 401 AddressLiteral ret = *this; 402 ret._is_lval = true; 403 return ret; 404 } 405 406 407 private: 408 409 address target() { return _target; } 410 bool is_lval() { return _is_lval; } 411 412 relocInfo::relocType reloc() const { return _rspec.type(); } 413 const RelocationHolder& rspec() const { return _rspec; } 414 415 friend class Assembler; 416 friend class MacroAssembler; 417 friend class Address; 418 friend class LIR_Assembler; 419 }; 420 421 // Convience classes 422 class RuntimeAddress: public AddressLiteral { 423 424 public: 425 426 RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} 427 428 }; 429 430 class ExternalAddress: public AddressLiteral { 431 private: 432 static relocInfo::relocType reloc_for_target(address target) { 433 // Sometimes ExternalAddress is used for values which aren't 434 // exactly addresses, like the card table base. 435 // external_word_type can't be used for values in the first page 436 // so just skip the reloc in that case. 437 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 438 } 439 440 public: 441 442 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {} 443 444 }; 445 446 class InternalAddress: public AddressLiteral { 447 448 public: 449 450 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} 451 452 }; 453 454 // x86 can do array addressing as a single operation since disp can be an absolute 455 // address amd64 can't. We create a class that expresses the concept but does extra 456 // magic on amd64 to get the final result 457 458 class ArrayAddress { 459 private: 460 461 AddressLiteral _base; 462 Address _index; 463 464 public: 465 466 ArrayAddress() {}; 467 ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; 468 AddressLiteral base() { return _base; } 469 Address index() { return _index; } 470 471 }; 472 473 class InstructionAttr; 474 475 // 64-bit refect the fxsave size which is 512 bytes and the new xsave area on EVEX which is another 2176 bytes 476 // See fxsave and xsave(EVEX enabled) documentation for layout 477 const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY(2688 / wordSize); 478 479 // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction 480 // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write 481 // is what you get. The Assembler is generating code into a CodeBuffer. 482 483 class Assembler : public AbstractAssembler { 484 friend class AbstractAssembler; // for the non-virtual hack 485 friend class LIR_Assembler; // as_Address() 486 friend class StubGenerator; 487 488 public: 489 enum Condition { // The x86 condition codes used for conditional jumps/moves. 490 zero = 0x4, 491 notZero = 0x5, 492 equal = 0x4, 493 notEqual = 0x5, 494 less = 0xc, 495 lessEqual = 0xe, 496 greater = 0xf, 497 greaterEqual = 0xd, 498 below = 0x2, 499 belowEqual = 0x6, 500 above = 0x7, 501 aboveEqual = 0x3, 502 overflow = 0x0, 503 noOverflow = 0x1, 504 carrySet = 0x2, 505 carryClear = 0x3, 506 negative = 0x8, 507 positive = 0x9, 508 parity = 0xa, 509 noParity = 0xb 510 }; 511 512 enum Prefix { 513 // segment overrides 514 CS_segment = 0x2e, 515 SS_segment = 0x36, 516 DS_segment = 0x3e, 517 ES_segment = 0x26, 518 FS_segment = 0x64, 519 GS_segment = 0x65, 520 521 REX = 0x40, 522 523 REX_B = 0x41, 524 REX_X = 0x42, 525 REX_XB = 0x43, 526 REX_R = 0x44, 527 REX_RB = 0x45, 528 REX_RX = 0x46, 529 REX_RXB = 0x47, 530 531 REX_W = 0x48, 532 533 REX_WB = 0x49, 534 REX_WX = 0x4A, 535 REX_WXB = 0x4B, 536 REX_WR = 0x4C, 537 REX_WRB = 0x4D, 538 REX_WRX = 0x4E, 539 REX_WRXB = 0x4F, 540 541 VEX_3bytes = 0xC4, 542 VEX_2bytes = 0xC5, 543 EVEX_4bytes = 0x62, 544 Prefix_EMPTY = 0x0 545 }; 546 547 enum VexPrefix { 548 VEX_B = 0x20, 549 VEX_X = 0x40, 550 VEX_R = 0x80, 551 VEX_W = 0x80 552 }; 553 554 enum ExexPrefix { 555 EVEX_F = 0x04, 556 EVEX_V = 0x08, 557 EVEX_Rb = 0x10, 558 EVEX_X = 0x40, 559 EVEX_Z = 0x80 560 }; 561 562 enum VexSimdPrefix { 563 VEX_SIMD_NONE = 0x0, 564 VEX_SIMD_66 = 0x1, 565 VEX_SIMD_F3 = 0x2, 566 VEX_SIMD_F2 = 0x3 567 }; 568 569 enum VexOpcode { 570 VEX_OPCODE_NONE = 0x0, 571 VEX_OPCODE_0F = 0x1, 572 VEX_OPCODE_0F_38 = 0x2, 573 VEX_OPCODE_0F_3A = 0x3, 574 VEX_OPCODE_MASK = 0x1F 575 }; 576 577 enum AvxVectorLen { 578 AVX_128bit = 0x0, 579 AVX_256bit = 0x1, 580 AVX_512bit = 0x2, 581 AVX_NoVec = 0x4 582 }; 583 584 enum EvexTupleType { 585 EVEX_FV = 0, 586 EVEX_HV = 4, 587 EVEX_FVM = 6, 588 EVEX_T1S = 7, 589 EVEX_T1F = 11, 590 EVEX_T2 = 13, 591 EVEX_T4 = 15, 592 EVEX_T8 = 17, 593 EVEX_HVM = 18, 594 EVEX_QVM = 19, 595 EVEX_OVM = 20, 596 EVEX_M128 = 21, 597 EVEX_DUP = 22, 598 EVEX_ETUP = 23 599 }; 600 601 enum EvexInputSizeInBits { 602 EVEX_8bit = 0, 603 EVEX_16bit = 1, 604 EVEX_32bit = 2, 605 EVEX_64bit = 3, 606 EVEX_NObit = 4 607 }; 608 609 enum WhichOperand { 610 // input to locate_operand, and format code for relocations 611 imm_operand = 0, // embedded 32-bit|64-bit immediate operand 612 disp32_operand = 1, // embedded 32-bit displacement or address 613 call32_operand = 2, // embedded 32-bit self-relative displacement 614 #ifndef _LP64 615 _WhichOperand_limit = 3 616 #else 617 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop 618 _WhichOperand_limit = 4 619 #endif 620 }; 621 622 // Comparison predicates for integral types & FP types when using SSE 623 enum ComparisonPredicate { 624 eq = 0, 625 lt = 1, 626 le = 2, 627 _false = 3, 628 neq = 4, 629 nlt = 5, 630 nle = 6, 631 _true = 7 632 }; 633 634 // Comparison predicates for FP types when using AVX 635 // O means ordered. U is unordered. When using ordered, any NaN comparison is false. Otherwise, it is true. 636 // S means signaling. Q means non-signaling. When signaling is true, instruction signals #IA on NaN. 637 enum ComparisonPredicateFP { 638 EQ_OQ = 0, 639 LT_OS = 1, 640 LE_OS = 2, 641 UNORD_Q = 3, 642 NEQ_UQ = 4, 643 NLT_US = 5, 644 NLE_US = 6, 645 ORD_Q = 7, 646 EQ_UQ = 8, 647 NGE_US = 9, 648 NGT_US = 0xA, 649 FALSE_OQ = 0XB, 650 NEQ_OQ = 0xC, 651 GE_OS = 0xD, 652 GT_OS = 0xE, 653 TRUE_UQ = 0xF, 654 EQ_OS = 0x10, 655 LT_OQ = 0x11, 656 LE_OQ = 0x12, 657 UNORD_S = 0x13, 658 NEQ_US = 0x14, 659 NLT_UQ = 0x15, 660 NLE_UQ = 0x16, 661 ORD_S = 0x17, 662 EQ_US = 0x18, 663 NGE_UQ = 0x19, 664 NGT_UQ = 0x1A, 665 FALSE_OS = 0x1B, 666 NEQ_OS = 0x1C, 667 GE_OQ = 0x1D, 668 GT_OQ = 0x1E, 669 TRUE_US =0x1F 670 }; 671 672 673 // NOTE: The general philopsophy of the declarations here is that 64bit versions 674 // of instructions are freely declared without the need for wrapping them an ifdef. 675 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.) 676 // In the .cpp file the implementations are wrapped so that they are dropped out 677 // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL 678 // to the size it was prior to merging up the 32bit and 64bit assemblers. 679 // 680 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction 681 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down. 682 683 private: 684 685 bool _legacy_mode_bw; 686 bool _legacy_mode_dq; 687 bool _legacy_mode_vl; 688 bool _legacy_mode_vlbw; 689 bool _is_managed; 690 bool _vector_masking; // For stub code use only 691 692 class InstructionAttr *_attributes; 693 694 // 64bit prefixes 695 int prefix_and_encode(int reg_enc, bool byteinst = false); 696 int prefixq_and_encode(int reg_enc); 697 698 int prefix_and_encode(int dst_enc, int src_enc) { 699 return prefix_and_encode(dst_enc, false, src_enc, false); 700 } 701 int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte); 702 int prefixq_and_encode(int dst_enc, int src_enc); 703 704 void prefix(Register reg); 705 void prefix(Register dst, Register src, Prefix p); 706 void prefix(Register dst, Address adr, Prefix p); 707 void prefix(Address adr); 708 void prefixq(Address adr); 709 710 void prefix(Address adr, Register reg, bool byteinst = false); 711 void prefix(Address adr, XMMRegister reg); 712 void prefixq(Address adr, Register reg); 713 void prefixq(Address adr, XMMRegister reg); 714 715 void prefetch_prefix(Address src); 716 717 void rex_prefix(Address adr, XMMRegister xreg, 718 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 719 int rex_prefix_and_encode(int dst_enc, int src_enc, 720 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 721 722 void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc); 723 724 void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, 725 int nds_enc, VexSimdPrefix pre, VexOpcode opc); 726 727 void vex_prefix(Address adr, int nds_enc, int xreg_enc, 728 VexSimdPrefix pre, VexOpcode opc, 729 InstructionAttr *attributes); 730 731 int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, 732 VexSimdPrefix pre, VexOpcode opc, 733 InstructionAttr *attributes); 734 735 void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 736 VexOpcode opc, InstructionAttr *attributes); 737 738 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 739 VexOpcode opc, InstructionAttr *attributes); 740 741 // Helper functions for groups of instructions 742 void emit_arith_b(int op1, int op2, Register dst, int imm8); 743 744 void emit_arith(int op1, int op2, Register dst, int32_t imm32); 745 // Force generation of a 4 byte immediate value even if it fits into 8bit 746 void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32); 747 void emit_arith(int op1, int op2, Register dst, Register src); 748 749 bool emit_compressed_disp_byte(int &disp); 750 751 void emit_operand(Register reg, 752 Register base, Register index, Address::ScaleFactor scale, 753 int disp, 754 RelocationHolder const& rspec, 755 int rip_relative_correction = 0); 756 757 void emit_operand(XMMRegister reg, Register base, XMMRegister index, 758 Address::ScaleFactor scale, 759 int disp, RelocationHolder const& rspec); 760 761 void emit_operand(Register reg, Address adr, int rip_relative_correction = 0); 762 763 // operands that only take the original 32bit registers 764 void emit_operand32(Register reg, Address adr); 765 766 void emit_operand(XMMRegister reg, 767 Register base, Register index, Address::ScaleFactor scale, 768 int disp, 769 RelocationHolder const& rspec); 770 771 void emit_operand(XMMRegister reg, Address adr); 772 773 void emit_operand(MMXRegister reg, Address adr); 774 775 // workaround gcc (3.2.1-7) bug 776 void emit_operand(Address adr, MMXRegister reg); 777 778 779 // Immediate-to-memory forms 780 void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32); 781 782 void emit_farith(int b1, int b2, int i); 783 784 785 protected: 786 #ifdef ASSERT 787 void check_relocation(RelocationHolder const& rspec, int format); 788 #endif 789 790 void emit_data(jint data, relocInfo::relocType rtype, int format); 791 void emit_data(jint data, RelocationHolder const& rspec, int format); 792 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 793 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 794 795 bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); 796 797 // These are all easily abused and hence protected 798 799 // 32BIT ONLY SECTION 800 #ifndef _LP64 801 // Make these disappear in 64bit mode since they would never be correct 802 void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 803 void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 804 805 void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 806 void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 807 808 void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 809 #else 810 // 64BIT ONLY SECTION 811 void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY 812 813 void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec); 814 void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec); 815 816 void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec); 817 void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec); 818 #endif // _LP64 819 820 // These are unique in that we are ensured by the caller that the 32bit 821 // relative in these instructions will always be able to reach the potentially 822 // 64bit address described by entry. Since they can take a 64bit address they 823 // don't have the 32 suffix like the other instructions in this class. 824 825 void call_literal(address entry, RelocationHolder const& rspec); 826 void jmp_literal(address entry, RelocationHolder const& rspec); 827 828 // Avoid using directly section 829 // Instructions in this section are actually usable by anyone without danger 830 // of failure but have performance issues that are addressed my enhanced 831 // instructions which will do the proper thing base on the particular cpu. 832 // We protect them because we don't trust you... 833 834 // Don't use next inc() and dec() methods directly. INC & DEC instructions 835 // could cause a partial flag stall since they don't set CF flag. 836 // Use MacroAssembler::decrement() & MacroAssembler::increment() methods 837 // which call inc() & dec() or add() & sub() in accordance with 838 // the product flag UseIncDec value. 839 840 void decl(Register dst); 841 void decl(Address dst); 842 void decq(Register dst); 843 void decq(Address dst); 844 845 void incl(Register dst); 846 void incl(Address dst); 847 void incq(Register dst); 848 void incq(Address dst); 849 850 // New cpus require use of movsd and movss to avoid partial register stall 851 // when loading from memory. But for old Opteron use movlpd instead of movsd. 852 // The selection is done in MacroAssembler::movdbl() and movflt(). 853 854 // Move Scalar Single-Precision Floating-Point Values 855 void movss(XMMRegister dst, Address src); 856 void movss(XMMRegister dst, XMMRegister src); 857 void movss(Address dst, XMMRegister src); 858 859 // Move Scalar Double-Precision Floating-Point Values 860 void movsd(XMMRegister dst, Address src); 861 void movsd(XMMRegister dst, XMMRegister src); 862 void movsd(Address dst, XMMRegister src); 863 void movlpd(XMMRegister dst, Address src); 864 865 // New cpus require use of movaps and movapd to avoid partial register stall 866 // when moving between registers. 867 void movaps(XMMRegister dst, XMMRegister src); 868 void movapd(XMMRegister dst, XMMRegister src); 869 870 // End avoid using directly 871 872 873 // Instruction prefixes 874 void prefix(Prefix p); 875 876 public: 877 878 // Creation 879 Assembler(CodeBuffer* code) : AbstractAssembler(code) { 880 init_attributes(); 881 } 882 883 // Decoding 884 static address locate_operand(address inst, WhichOperand which); 885 static address locate_next_instruction(address inst); 886 887 // Utilities 888 static bool is_polling_page_far() NOT_LP64({ return false;}); 889 static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 890 int cur_tuple_type, int in_size_in_bits, int cur_encoding); 891 892 // Generic instructions 893 // Does 32bit or 64bit as needed for the platform. In some sense these 894 // belong in macro assembler but there is no need for both varieties to exist 895 896 void init_attributes(void) { 897 _legacy_mode_bw = (VM_Version::supports_avx512bw() == false); 898 _legacy_mode_dq = (VM_Version::supports_avx512dq() == false); 899 _legacy_mode_vl = (VM_Version::supports_avx512vl() == false); 900 _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false); 901 _is_managed = false; 902 _vector_masking = false; 903 _attributes = NULL; 904 } 905 906 void set_attributes(InstructionAttr *attributes) { _attributes = attributes; } 907 void clear_attributes(void) { _attributes = NULL; } 908 909 void set_managed(void) { _is_managed = true; } 910 void clear_managed(void) { _is_managed = false; } 911 bool is_managed(void) { return _is_managed; } 912 913 // Following functions are for stub code use only 914 void set_vector_masking(void) { _vector_masking = true; } 915 void clear_vector_masking(void) { _vector_masking = false; } 916 bool is_vector_masking(void) { return _vector_masking; } 917 918 void lea(Register dst, Address src); 919 920 void mov(Register dst, Register src); 921 922 void pusha(); 923 void popa(); 924 925 void pushf(); 926 void popf(); 927 928 void push(int32_t imm32); 929 930 void push(Register src); 931 932 void pop(Register dst); 933 934 // These are dummies to prevent surprise implicit conversions to Register 935 void push(void* v); 936 void pop(void* v); 937 938 // These do register sized moves/scans 939 void rep_mov(); 940 void rep_stos(); 941 void rep_stosb(); 942 void repne_scan(); 943 #ifdef _LP64 944 void repne_scanl(); 945 #endif 946 947 // Vanilla instructions in lexical order 948 949 void adcl(Address dst, int32_t imm32); 950 void adcl(Address dst, Register src); 951 void adcl(Register dst, int32_t imm32); 952 void adcl(Register dst, Address src); 953 void adcl(Register dst, Register src); 954 955 void adcq(Register dst, int32_t imm32); 956 void adcq(Register dst, Address src); 957 void adcq(Register dst, Register src); 958 959 void addb(Address dst, int imm8); 960 void addw(Register dst, Register src); 961 void addw(Address dst, int imm16); 962 963 void addl(Address dst, int32_t imm32); 964 void addl(Address dst, Register src); 965 void addl(Register dst, int32_t imm32); 966 void addl(Register dst, Address src); 967 void addl(Register dst, Register src); 968 969 void addq(Address dst, int32_t imm32); 970 void addq(Address dst, Register src); 971 void addq(Register dst, int32_t imm32); 972 void addq(Register dst, Address src); 973 void addq(Register dst, Register src); 974 975 #ifdef _LP64 976 //Add Unsigned Integers with Carry Flag 977 void adcxq(Register dst, Register src); 978 979 //Add Unsigned Integers with Overflow Flag 980 void adoxq(Register dst, Register src); 981 #endif 982 983 void addr_nop_4(); 984 void addr_nop_5(); 985 void addr_nop_7(); 986 void addr_nop_8(); 987 988 // Add Scalar Double-Precision Floating-Point Values 989 void addsd(XMMRegister dst, Address src); 990 void addsd(XMMRegister dst, XMMRegister src); 991 992 // Add Scalar Single-Precision Floating-Point Values 993 void addss(XMMRegister dst, Address src); 994 void addss(XMMRegister dst, XMMRegister src); 995 996 // AES instructions 997 void aesdec(XMMRegister dst, Address src); 998 void aesdec(XMMRegister dst, XMMRegister src); 999 void aesdeclast(XMMRegister dst, Address src); 1000 void aesdeclast(XMMRegister dst, XMMRegister src); 1001 void aesenc(XMMRegister dst, Address src); 1002 void aesenc(XMMRegister dst, XMMRegister src); 1003 void aesenclast(XMMRegister dst, Address src); 1004 void aesenclast(XMMRegister dst, XMMRegister src); 1005 void vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1006 void vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1007 1008 void andw(Register dst, Register src); 1009 1010 void andl(Address dst, int32_t imm32); 1011 void andl(Register dst, int32_t imm32); 1012 void andl(Register dst, Address src); 1013 void andl(Register dst, Register src); 1014 1015 void andq(Address dst, int32_t imm32); 1016 void andq(Register dst, int32_t imm32); 1017 void andq(Register dst, Address src); 1018 void andq(Register dst, Register src); 1019 1020 // BMI instructions 1021 void andnl(Register dst, Register src1, Register src2); 1022 void andnl(Register dst, Register src1, Address src2); 1023 void andnq(Register dst, Register src1, Register src2); 1024 void andnq(Register dst, Register src1, Address src2); 1025 1026 void blsil(Register dst, Register src); 1027 void blsil(Register dst, Address src); 1028 void blsiq(Register dst, Register src); 1029 void blsiq(Register dst, Address src); 1030 1031 void blsmskl(Register dst, Register src); 1032 void blsmskl(Register dst, Address src); 1033 void blsmskq(Register dst, Register src); 1034 void blsmskq(Register dst, Address src); 1035 1036 void blsrl(Register dst, Register src); 1037 void blsrl(Register dst, Address src); 1038 void blsrq(Register dst, Register src); 1039 void blsrq(Register dst, Address src); 1040 1041 void bsfl(Register dst, Register src); 1042 void bsrl(Register dst, Register src); 1043 1044 #ifdef _LP64 1045 void bsfq(Register dst, Register src); 1046 void bsrq(Register dst, Register src); 1047 #endif 1048 1049 void bswapl(Register reg); 1050 1051 void bswapq(Register reg); 1052 1053 void call(Label& L, relocInfo::relocType rtype); 1054 void call(Register reg); // push pc; pc <- reg 1055 void call(Address adr); // push pc; pc <- adr 1056 1057 void cdql(); 1058 1059 void cdqq(); 1060 1061 void cld(); 1062 1063 void clflush(Address adr); 1064 1065 void cmovl(Condition cc, Register dst, Register src); 1066 void cmovl(Condition cc, Register dst, Address src); 1067 1068 void cmovq(Condition cc, Register dst, Register src); 1069 void cmovq(Condition cc, Register dst, Address src); 1070 1071 1072 void cmpb(Address dst, int imm8); 1073 1074 void cmpl(Address dst, int32_t imm32); 1075 1076 void cmpl(Register dst, int32_t imm32); 1077 void cmpl(Register dst, Register src); 1078 void cmpl(Register dst, Address src); 1079 1080 void cmpq(Address dst, int32_t imm32); 1081 void cmpq(Address dst, Register src); 1082 1083 void cmpq(Register dst, int32_t imm32); 1084 void cmpq(Register dst, Register src); 1085 void cmpq(Register dst, Address src); 1086 1087 // these are dummies used to catch attempting to convert NULL to Register 1088 void cmpl(Register dst, void* junk); // dummy 1089 void cmpq(Register dst, void* junk); // dummy 1090 1091 void cmpw(Address dst, int imm16); 1092 1093 void cmpxchg8 (Address adr); 1094 1095 void cmpxchgb(Register reg, Address adr); 1096 void cmpxchgl(Register reg, Address adr); 1097 1098 void cmpxchgq(Register reg, Address adr); 1099 1100 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 1101 void comisd(XMMRegister dst, Address src); 1102 void comisd(XMMRegister dst, XMMRegister src); 1103 1104 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 1105 void comiss(XMMRegister dst, Address src); 1106 void comiss(XMMRegister dst, XMMRegister src); 1107 1108 // Identify processor type and features 1109 void cpuid(); 1110 1111 // CRC32C 1112 void crc32(Register crc, Register v, int8_t sizeInBytes); 1113 void crc32(Register crc, Address adr, int8_t sizeInBytes); 1114 1115 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value 1116 void cvtsd2ss(XMMRegister dst, XMMRegister src); 1117 void cvtsd2ss(XMMRegister dst, Address src); 1118 1119 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value 1120 void cvtsi2sdl(XMMRegister dst, Register src); 1121 void cvtsi2sdl(XMMRegister dst, Address src); 1122 void cvtsi2sdq(XMMRegister dst, Register src); 1123 void cvtsi2sdq(XMMRegister dst, Address src); 1124 1125 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value 1126 void cvtsi2ssl(XMMRegister dst, Register src); 1127 void cvtsi2ssl(XMMRegister dst, Address src); 1128 void cvtsi2ssq(XMMRegister dst, Register src); 1129 void cvtsi2ssq(XMMRegister dst, Address src); 1130 1131 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value 1132 void cvtdq2pd(XMMRegister dst, XMMRegister src); 1133 void vcvtdq2pd(XMMRegister dst, XMMRegister src, int vector_len); 1134 1135 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value 1136 void cvtdq2ps(XMMRegister dst, XMMRegister src); 1137 void vcvtdq2ps(XMMRegister dst, XMMRegister src, int vector_len); 1138 1139 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value 1140 void cvtss2sd(XMMRegister dst, XMMRegister src); 1141 void cvtss2sd(XMMRegister dst, Address src); 1142 1143 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer 1144 void cvttsd2sil(Register dst, Address src); 1145 void cvttsd2sil(Register dst, XMMRegister src); 1146 void cvttsd2siq(Register dst, XMMRegister src); 1147 1148 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer 1149 void cvttss2sil(Register dst, XMMRegister src); 1150 void cvttss2siq(Register dst, XMMRegister src); 1151 1152 // Convert vector double to int 1153 void cvttpd2dq(XMMRegister dst, XMMRegister src); 1154 1155 // Convert vector float and double 1156 void vcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len); 1157 void evcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len); 1158 void vcvtpd2ps(XMMRegister dst, XMMRegister src, int vector_len); 1159 void evcvtpd2ps(XMMRegister dst, XMMRegister src, int vector_len); 1160 1161 // Convert vector long to vector FP 1162 void evcvtqq2ps(XMMRegister dst, XMMRegister src, int vector_len); 1163 void evcvtqq2pd(XMMRegister dst, XMMRegister src, int vector_len); 1164 1165 // Evex casts with truncation 1166 void evpmovwb(XMMRegister dst, XMMRegister src, int vector_len); 1167 void evpmovdw(XMMRegister dst, XMMRegister src, int vector_len); 1168 void evpmovdb(XMMRegister dst, XMMRegister src, int vector_len); 1169 void evpmovqd(XMMRegister dst, XMMRegister src, int vector_len); 1170 void evpmovqb(XMMRegister dst, XMMRegister src, int vector_len); 1171 void evpmovqw(XMMRegister dst, XMMRegister src, int vector_len); 1172 1173 //Abs of packed Integer values 1174 void pabsb(XMMRegister dst, XMMRegister src); 1175 void pabsw(XMMRegister dst, XMMRegister src); 1176 void pabsd(XMMRegister dst, XMMRegister src); 1177 void vpabsb(XMMRegister dst, XMMRegister src, int vector_len); 1178 void vpabsw(XMMRegister dst, XMMRegister src, int vector_len); 1179 void vpabsd(XMMRegister dst, XMMRegister src, int vector_len); 1180 void evpabsb(XMMRegister dst, XMMRegister src, int vector_len); 1181 void evpabsw(XMMRegister dst, XMMRegister src, int vector_len); 1182 void evpabsd(XMMRegister dst, XMMRegister src, int vector_len); 1183 void evpabsq(XMMRegister dst, XMMRegister src, int vector_len); 1184 1185 // Divide Scalar Double-Precision Floating-Point Values 1186 void divsd(XMMRegister dst, Address src); 1187 void divsd(XMMRegister dst, XMMRegister src); 1188 1189 // Divide Scalar Single-Precision Floating-Point Values 1190 void divss(XMMRegister dst, Address src); 1191 void divss(XMMRegister dst, XMMRegister src); 1192 1193 void emms(); 1194 1195 void fabs(); 1196 1197 void fadd(int i); 1198 1199 void fadd_d(Address src); 1200 void fadd_s(Address src); 1201 1202 // "Alternate" versions of x87 instructions place result down in FPU 1203 // stack instead of on TOS 1204 1205 void fadda(int i); // "alternate" fadd 1206 void faddp(int i = 1); 1207 1208 void fchs(); 1209 1210 void fcom(int i); 1211 1212 void fcomp(int i = 1); 1213 void fcomp_d(Address src); 1214 void fcomp_s(Address src); 1215 1216 void fcompp(); 1217 1218 void fcos(); 1219 1220 void fdecstp(); 1221 1222 void fdiv(int i); 1223 void fdiv_d(Address src); 1224 void fdivr_s(Address src); 1225 void fdiva(int i); // "alternate" fdiv 1226 void fdivp(int i = 1); 1227 1228 void fdivr(int i); 1229 void fdivr_d(Address src); 1230 void fdiv_s(Address src); 1231 1232 void fdivra(int i); // "alternate" reversed fdiv 1233 1234 void fdivrp(int i = 1); 1235 1236 void ffree(int i = 0); 1237 1238 void fild_d(Address adr); 1239 void fild_s(Address adr); 1240 1241 void fincstp(); 1242 1243 void finit(); 1244 1245 void fist_s (Address adr); 1246 void fistp_d(Address adr); 1247 void fistp_s(Address adr); 1248 1249 void fld1(); 1250 1251 void fld_d(Address adr); 1252 void fld_s(Address adr); 1253 void fld_s(int index); 1254 void fld_x(Address adr); // extended-precision (80-bit) format 1255 1256 void fldcw(Address src); 1257 1258 void fldenv(Address src); 1259 1260 void fldlg2(); 1261 1262 void fldln2(); 1263 1264 void fldz(); 1265 1266 void flog(); 1267 void flog10(); 1268 1269 void fmul(int i); 1270 1271 void fmul_d(Address src); 1272 void fmul_s(Address src); 1273 1274 void fmula(int i); // "alternate" fmul 1275 1276 void fmulp(int i = 1); 1277 1278 void fnsave(Address dst); 1279 1280 void fnstcw(Address src); 1281 1282 void fnstsw_ax(); 1283 1284 void fprem(); 1285 void fprem1(); 1286 1287 void frstor(Address src); 1288 1289 void fsin(); 1290 1291 void fsqrt(); 1292 1293 void fst_d(Address adr); 1294 void fst_s(Address adr); 1295 1296 void fstp_d(Address adr); 1297 void fstp_d(int index); 1298 void fstp_s(Address adr); 1299 void fstp_x(Address adr); // extended-precision (80-bit) format 1300 1301 void fsub(int i); 1302 void fsub_d(Address src); 1303 void fsub_s(Address src); 1304 1305 void fsuba(int i); // "alternate" fsub 1306 1307 void fsubp(int i = 1); 1308 1309 void fsubr(int i); 1310 void fsubr_d(Address src); 1311 void fsubr_s(Address src); 1312 1313 void fsubra(int i); // "alternate" reversed fsub 1314 1315 void fsubrp(int i = 1); 1316 1317 void ftan(); 1318 1319 void ftst(); 1320 1321 void fucomi(int i = 1); 1322 void fucomip(int i = 1); 1323 1324 void fwait(); 1325 1326 void fxch(int i = 1); 1327 1328 void fxrstor(Address src); 1329 void xrstor(Address src); 1330 1331 void fxsave(Address dst); 1332 void xsave(Address dst); 1333 1334 void fyl2x(); 1335 void frndint(); 1336 void f2xm1(); 1337 void fldl2e(); 1338 1339 void hlt(); 1340 1341 void idivl(Register src); 1342 void divl(Register src); // Unsigned division 1343 1344 #ifdef _LP64 1345 void idivq(Register src); 1346 #endif 1347 1348 void imull(Register src); 1349 void imull(Register dst, Register src); 1350 void imull(Register dst, Register src, int value); 1351 void imull(Register dst, Address src); 1352 1353 #ifdef _LP64 1354 void imulq(Register dst, Register src); 1355 void imulq(Register dst, Register src, int value); 1356 void imulq(Register dst, Address src); 1357 #endif 1358 1359 // jcc is the generic conditional branch generator to run- 1360 // time routines, jcc is used for branches to labels. jcc 1361 // takes a branch opcode (cc) and a label (L) and generates 1362 // either a backward branch or a forward branch and links it 1363 // to the label fixup chain. Usage: 1364 // 1365 // Label L; // unbound label 1366 // jcc(cc, L); // forward branch to unbound label 1367 // bind(L); // bind label to the current pc 1368 // jcc(cc, L); // backward branch to bound label 1369 // bind(L); // illegal: a label may be bound only once 1370 // 1371 // Note: The same Label can be used for forward and backward branches 1372 // but it may be bound only once. 1373 1374 void jcc(Condition cc, Label& L, bool maybe_short = true); 1375 1376 // Conditional jump to a 8-bit offset to L. 1377 // WARNING: be very careful using this for forward jumps. If the label is 1378 // not bound within an 8-bit offset of this instruction, a run-time error 1379 // will occur. 1380 1381 // Use macro to record file and line number. 1382 #define jccb(cc, L) jccb_0(cc, L, __FILE__, __LINE__) 1383 1384 void jccb_0(Condition cc, Label& L, const char* file, int line); 1385 1386 void jmp(Address entry); // pc <- entry 1387 1388 // Label operations & relative jumps (PPUM Appendix D) 1389 void jmp(Label& L, bool maybe_short = true); // unconditional jump to L 1390 1391 void jmp(Register entry); // pc <- entry 1392 1393 // Unconditional 8-bit offset jump to L. 1394 // WARNING: be very careful using this for forward jumps. If the label is 1395 // not bound within an 8-bit offset of this instruction, a run-time error 1396 // will occur. 1397 1398 // Use macro to record file and line number. 1399 #define jmpb(L) jmpb_0(L, __FILE__, __LINE__) 1400 1401 void jmpb_0(Label& L, const char* file, int line); 1402 1403 void ldmxcsr( Address src ); 1404 1405 void leal(Register dst, Address src); 1406 1407 void leaq(Register dst, Address src); 1408 1409 void lfence(); 1410 1411 void lock(); 1412 1413 void lzcntl(Register dst, Register src); 1414 1415 #ifdef _LP64 1416 void lzcntq(Register dst, Register src); 1417 #endif 1418 1419 enum Membar_mask_bits { 1420 StoreStore = 1 << 3, 1421 LoadStore = 1 << 2, 1422 StoreLoad = 1 << 1, 1423 LoadLoad = 1 << 0 1424 }; 1425 1426 // Serializes memory and blows flags 1427 void membar(Membar_mask_bits order_constraint) { 1428 if (os::is_MP()) { 1429 // We only have to handle StoreLoad 1430 if (order_constraint & StoreLoad) { 1431 // All usable chips support "locked" instructions which suffice 1432 // as barriers, and are much faster than the alternative of 1433 // using cpuid instruction. We use here a locked add [esp-C],0. 1434 // This is conveniently otherwise a no-op except for blowing 1435 // flags, and introducing a false dependency on target memory 1436 // location. We can't do anything with flags, but we can avoid 1437 // memory dependencies in the current method by locked-adding 1438 // somewhere else on the stack. Doing [esp+C] will collide with 1439 // something on stack in current method, hence we go for [esp-C]. 1440 // It is convenient since it is almost always in data cache, for 1441 // any small C. We need to step back from SP to avoid data 1442 // dependencies with other things on below SP (callee-saves, for 1443 // example). Without a clear way to figure out the minimal safe 1444 // distance from SP, it makes sense to step back the complete 1445 // cache line, as this will also avoid possible second-order effects 1446 // with locked ops against the cache line. Our choice of offset 1447 // is bounded by x86 operand encoding, which should stay within 1448 // [-128; +127] to have the 8-byte displacement encoding. 1449 // 1450 // Any change to this code may need to revisit other places in 1451 // the code where this idiom is used, in particular the 1452 // orderAccess code. 1453 1454 int offset = -VM_Version::L1_line_size(); 1455 if (offset < -128) { 1456 offset = -128; 1457 } 1458 1459 lock(); 1460 addl(Address(rsp, offset), 0);// Assert the lock# signal here 1461 } 1462 } 1463 } 1464 1465 void mfence(); 1466 1467 // Moves 1468 1469 void mov64(Register dst, int64_t imm64); 1470 1471 void movb(Address dst, Register src); 1472 void movb(Address dst, int imm8); 1473 void movb(Register dst, Address src); 1474 1475 void movddup(XMMRegister dst, XMMRegister src); 1476 1477 void kmovbl(KRegister dst, Register src); 1478 void kmovbl(Register dst, KRegister src); 1479 void kmovwl(KRegister dst, Register src); 1480 void kmovwl(KRegister dst, Address src); 1481 void kmovwl(Register dst, KRegister src); 1482 void kmovdl(KRegister dst, Register src); 1483 void kmovdl(Register dst, KRegister src); 1484 void kmovql(KRegister dst, KRegister src); 1485 void kmovql(Address dst, KRegister src); 1486 void kmovql(KRegister dst, Address src); 1487 void kmovql(KRegister dst, Register src); 1488 void kmovql(Register dst, KRegister src); 1489 1490 void knotwl(KRegister dst, KRegister src); 1491 1492 void kortestbl(KRegister dst, KRegister src); 1493 void kortestwl(KRegister dst, KRegister src); 1494 void kortestdl(KRegister dst, KRegister src); 1495 void kortestql(KRegister dst, KRegister src); 1496 1497 void ktestq(KRegister src1, KRegister src2); 1498 void ktestd(KRegister src1, KRegister src2); 1499 1500 void ktestql(KRegister dst, KRegister src); 1501 1502 void movdl(XMMRegister dst, Register src); 1503 void movdl(Register dst, XMMRegister src); 1504 void movdl(XMMRegister dst, Address src); 1505 void movdl(Address dst, XMMRegister src); 1506 1507 // Move Double Quadword 1508 void movdq(XMMRegister dst, Register src); 1509 void movdq(Register dst, XMMRegister src); 1510 1511 // Move Aligned Double Quadword 1512 void movdqa(XMMRegister dst, XMMRegister src); 1513 void movdqa(XMMRegister dst, Address src); 1514 1515 // Move Unaligned Double Quadword 1516 void movdqu(Address dst, XMMRegister src); 1517 void movdqu(XMMRegister dst, Address src); 1518 void movdqu(XMMRegister dst, XMMRegister src); 1519 1520 // Move Unaligned 256bit Vector 1521 void vmovdqu(Address dst, XMMRegister src); 1522 void vmovdqu(XMMRegister dst, Address src); 1523 void vmovdqu(XMMRegister dst, XMMRegister src); 1524 1525 // Move Unaligned 512bit Vector 1526 void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len); 1527 void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len); 1528 void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len); 1529 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len); 1530 void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len); 1531 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len); 1532 void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len); 1533 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len); 1534 void evmovdqul(Address dst, XMMRegister src, int vector_len); 1535 void evmovdqul(XMMRegister dst, Address src, int vector_len); 1536 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len); 1537 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len); 1538 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len); 1539 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len); 1540 void evmovdquq(Address dst, XMMRegister src, int vector_len); 1541 void evmovdquq(XMMRegister dst, Address src, int vector_len); 1542 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len); 1543 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len); 1544 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len); 1545 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len); 1546 1547 // Move lower 64bit to high 64bit in 128bit register 1548 void movlhps(XMMRegister dst, XMMRegister src); 1549 1550 void movl(Register dst, int32_t imm32); 1551 void movl(Address dst, int32_t imm32); 1552 void movl(Register dst, Register src); 1553 void movl(Register dst, Address src); 1554 void movl(Address dst, Register src); 1555 1556 // These dummies prevent using movl from converting a zero (like NULL) into Register 1557 // by giving the compiler two choices it can't resolve 1558 1559 void movl(Address dst, void* junk); 1560 void movl(Register dst, void* junk); 1561 1562 #ifdef _LP64 1563 void movq(Register dst, Register src); 1564 void movq(Register dst, Address src); 1565 void movq(Address dst, Register src); 1566 #endif 1567 1568 void movq(Address dst, MMXRegister src ); 1569 void movq(MMXRegister dst, Address src ); 1570 1571 #ifdef _LP64 1572 // These dummies prevent using movq from converting a zero (like NULL) into Register 1573 // by giving the compiler two choices it can't resolve 1574 1575 void movq(Address dst, void* dummy); 1576 void movq(Register dst, void* dummy); 1577 #endif 1578 1579 // Move Quadword 1580 void movq(Address dst, XMMRegister src); 1581 void movq(XMMRegister dst, Address src); 1582 void movq(Register dst, XMMRegister src); 1583 void movq(XMMRegister dst, Register src); 1584 1585 void movsbl(Register dst, Address src); 1586 void movsbl(Register dst, Register src); 1587 1588 #ifdef _LP64 1589 void movsbq(Register dst, Address src); 1590 void movsbq(Register dst, Register src); 1591 1592 // Move signed 32bit immediate to 64bit extending sign 1593 void movslq(Address dst, int32_t imm64); 1594 void movslq(Register dst, int32_t imm64); 1595 1596 void movslq(Register dst, Address src); 1597 void movslq(Register dst, Register src); 1598 void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous 1599 #endif 1600 1601 void movswl(Register dst, Address src); 1602 void movswl(Register dst, Register src); 1603 1604 #ifdef _LP64 1605 void movswq(Register dst, Address src); 1606 void movswq(Register dst, Register src); 1607 #endif 1608 1609 void movw(Address dst, int imm16); 1610 void movw(Register dst, Address src); 1611 void movw(Address dst, Register src); 1612 1613 void movzbl(Register dst, Address src); 1614 void movzbl(Register dst, Register src); 1615 1616 #ifdef _LP64 1617 void movzbq(Register dst, Address src); 1618 void movzbq(Register dst, Register src); 1619 #endif 1620 1621 void movzwl(Register dst, Address src); 1622 void movzwl(Register dst, Register src); 1623 1624 #ifdef _LP64 1625 void movzwq(Register dst, Address src); 1626 void movzwq(Register dst, Register src); 1627 #endif 1628 1629 // Unsigned multiply with RAX destination register 1630 void mull(Address src); 1631 void mull(Register src); 1632 1633 #ifdef _LP64 1634 void mulq(Address src); 1635 void mulq(Register src); 1636 void mulxq(Register dst1, Register dst2, Register src); 1637 #endif 1638 1639 // Multiply Scalar Double-Precision Floating-Point Values 1640 void mulsd(XMMRegister dst, Address src); 1641 void mulsd(XMMRegister dst, XMMRegister src); 1642 1643 // Multiply Scalar Single-Precision Floating-Point Values 1644 void mulss(XMMRegister dst, Address src); 1645 void mulss(XMMRegister dst, XMMRegister src); 1646 1647 void negl(Register dst); 1648 1649 #ifdef _LP64 1650 void negq(Register dst); 1651 #endif 1652 1653 void nop(int i = 1); 1654 1655 void notl(Register dst); 1656 1657 #ifdef _LP64 1658 void notq(Register dst); 1659 #endif 1660 1661 void orw(Register dst, Register src); 1662 1663 void orl(Address dst, int32_t imm32); 1664 void orl(Register dst, int32_t imm32); 1665 void orl(Register dst, Address src); 1666 void orl(Register dst, Register src); 1667 void orl(Address dst, Register src); 1668 1669 void orb(Address dst, int imm8); 1670 1671 void orq(Address dst, int32_t imm32); 1672 void orq(Register dst, int32_t imm32); 1673 void orq(Register dst, Address src); 1674 void orq(Register dst, Register src); 1675 1676 // Pack with unsigned saturation 1677 void packuswb(XMMRegister dst, XMMRegister src); 1678 void packuswb(XMMRegister dst, Address src); 1679 void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1680 void vpackusdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1681 1682 // Permutations 1683 void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len); 1684 void vpermq(XMMRegister dst, XMMRegister src, int imm8); 1685 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1686 void vpermd(XMMRegister dst, XMMRegister nds, Address src); 1687 void vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8); 1688 void vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8); 1689 void vpermilps(XMMRegister dst, XMMRegister src, int imm8, int vector_len); 1690 void vpermpd(XMMRegister dst, XMMRegister src, int imm8, int vector_len); 1691 void evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1692 1693 void pause(); 1694 1695 // Undefined Instruction 1696 void ud2(); 1697 1698 // SSE4.2 string instructions 1699 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); 1700 void pcmpestri(XMMRegister xmm1, Address src, int imm8); 1701 1702 void pcmpeqb(XMMRegister dst, XMMRegister src); 1703 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1704 void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1705 void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1706 void evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); 1707 1708 void vpcmpgtb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1709 void evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1710 void evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); 1711 1712 void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len); 1713 void evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate of, int vector_len); 1714 void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len); 1715 1716 void pcmpeqw(XMMRegister dst, XMMRegister src); 1717 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1718 void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1719 void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1720 1721 void vpcmpgtw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1722 1723 void pcmpeqd(XMMRegister dst, XMMRegister src); 1724 void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1725 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len); 1726 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); 1727 1728 void pcmpeqq(XMMRegister dst, XMMRegister src); 1729 void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1730 void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1731 void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1732 1733 void pcmpgtq(XMMRegister dst, XMMRegister src); 1734 void vpcmpgtq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1735 1736 void pmovmskb(Register dst, XMMRegister src); 1737 void vpmovmskb(Register dst, XMMRegister src); 1738 1739 // SSE 4.1 extract 1740 void pextrd(Register dst, XMMRegister src, int imm8); 1741 void pextrq(Register dst, XMMRegister src, int imm8); 1742 void pextrd(Address dst, XMMRegister src, int imm8); 1743 void pextrq(Address dst, XMMRegister src, int imm8); 1744 void pextrb(Register dst, XMMRegister src, int imm8); 1745 void pextrb(Address dst, XMMRegister src, int imm8); 1746 // SSE 2 extract 1747 void pextrw(Register dst, XMMRegister src, int imm8); 1748 void pextrw(Address dst, XMMRegister src, int imm8); 1749 1750 // SSE 4.1 insert 1751 void pinsrd(XMMRegister dst, Register src, int imm8); 1752 void pinsrq(XMMRegister dst, Register src, int imm8); 1753 void pinsrb(XMMRegister dst, Register src, int imm8); 1754 void pinsrd(XMMRegister dst, Address src, int imm8); 1755 void pinsrq(XMMRegister dst, Address src, int imm8); 1756 void pinsrb(XMMRegister dst, Address src, int imm8); 1757 void insertps(XMMRegister dst, XMMRegister src, int imm8); 1758 // SSE 2 insert 1759 void pinsrw(XMMRegister dst, Register src, int imm8); 1760 void pinsrw(XMMRegister dst, Address src, int imm8); 1761 1762 // AVX insert 1763 void vpinsrd(XMMRegister dst, XMMRegister nds, Register src, int imm8); 1764 void vpinsrb(XMMRegister dst, XMMRegister nds, Register src, int imm8); 1765 void vpinsrq(XMMRegister dst, XMMRegister nds, Register src, int imm8); 1766 void vpinsrw(XMMRegister dst, XMMRegister nds, Register src, int imm8); 1767 void vinsertps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8); 1768 1769 // Zero extend moves 1770 void pmovzxbw(XMMRegister dst, XMMRegister src); 1771 void pmovzxbw(XMMRegister dst, Address src); 1772 void vpmovzxbw( XMMRegister dst, Address src, int vector_len); 1773 void pmovzxdq(XMMRegister dst, XMMRegister src); 1774 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len); 1775 void vpmovzxdq(XMMRegister dst, XMMRegister src, int vector_len); 1776 void vpmovzxbd(XMMRegister dst, XMMRegister src, int vector_len); 1777 void vpmovzxbq(XMMRegister dst, XMMRegister src, int vector_len); 1778 void evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len); 1779 1780 // Sign extend moves 1781 void pmovsxbw(XMMRegister dst, XMMRegister src); 1782 void pmovsxbd(XMMRegister dst, XMMRegister src); 1783 void pmovsxbq(XMMRegister dst, XMMRegister src); 1784 void vpmovsxbd(XMMRegister dst, XMMRegister src, int vector_len); 1785 void vpmovsxbq(XMMRegister dst, XMMRegister src, int vector_len); 1786 void vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len); 1787 void vpmovsxwd(XMMRegister dst, XMMRegister src, int vector_len); 1788 void vpmovsxwq(XMMRegister dst, XMMRegister src, int vector_len); 1789 void vpmovsxdq(XMMRegister dst, XMMRegister src, int vector_len); 1790 1791 void evpmovwb(Address dst, XMMRegister src, int vector_len); 1792 void evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len); 1793 1794 void vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len); 1795 1796 void evpmovdb(Address dst, XMMRegister src, int vector_len); 1797 1798 #ifndef _LP64 // no 32bit push/pop on amd64 1799 void popl(Address dst); 1800 #endif 1801 1802 #ifdef _LP64 1803 void popq(Address dst); 1804 #endif 1805 1806 void popcntl(Register dst, Address src); 1807 void popcntl(Register dst, Register src); 1808 1809 void vpopcntd(XMMRegister dst, XMMRegister src, int vector_len); 1810 1811 #ifdef _LP64 1812 void popcntq(Register dst, Address src); 1813 void popcntq(Register dst, Register src); 1814 #endif 1815 1816 // Prefetches (SSE, SSE2, 3DNOW only) 1817 1818 void prefetchnta(Address src); 1819 void prefetchr(Address src); 1820 void prefetcht0(Address src); 1821 void prefetcht1(Address src); 1822 void prefetcht2(Address src); 1823 void prefetchw(Address src); 1824 1825 // Shuffle Bytes 1826 void pshufb(XMMRegister dst, XMMRegister src); 1827 void pshufb(XMMRegister dst, Address src); 1828 void vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1829 1830 // Shuffle Packed Doublewords 1831 void pshufd(XMMRegister dst, XMMRegister src, int mode); 1832 void pshufd(XMMRegister dst, Address src, int mode); 1833 void vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len); 1834 1835 // Shuffle Packed Low Words 1836 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1837 void pshuflw(XMMRegister dst, Address src, int mode); 1838 1839 //shuffle floats and doubles 1840 void vpshufps(XMMRegister, XMMRegister, XMMRegister, int, int); 1841 void vpshufpd(XMMRegister, XMMRegister, XMMRegister, int, int); 1842 1843 // Shuffle packed values at 128 bit granularity 1844 void evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len); 1845 1846 // Shift Right by bytes Logical DoubleQuadword Immediate 1847 void psrldq(XMMRegister dst, int shift); 1848 // Shift Left by bytes Logical DoubleQuadword Immediate 1849 void pslldq(XMMRegister dst, int shift); 1850 1851 // Logical Compare 128bit 1852 void ptest(XMMRegister dst, XMMRegister src); 1853 void ptest(XMMRegister dst, Address src); 1854 // Logical Compare 256bit 1855 void vptest(XMMRegister dst, XMMRegister src); 1856 void vptest(XMMRegister dst, Address src); 1857 1858 // Vector compare 1859 void vptest(XMMRegister dst, XMMRegister src, int vector_len); 1860 1861 // Interleave Low Bytes 1862 void punpcklbw(XMMRegister dst, XMMRegister src); 1863 void punpcklbw(XMMRegister dst, Address src); 1864 1865 // Interleave Low Doublewords 1866 void punpckldq(XMMRegister dst, XMMRegister src); 1867 void punpckldq(XMMRegister dst, Address src); 1868 1869 // Interleave Low Quadwords 1870 void punpcklqdq(XMMRegister dst, XMMRegister src); 1871 1872 #ifndef _LP64 // no 32bit push/pop on amd64 1873 void pushl(Address src); 1874 #endif 1875 1876 void pushq(Address src); 1877 1878 void rcll(Register dst, int imm8); 1879 1880 void rclq(Register dst, int imm8); 1881 1882 void rcrq(Register dst, int imm8); 1883 1884 void rcpps(XMMRegister dst, XMMRegister src); 1885 1886 void rcpss(XMMRegister dst, XMMRegister src); 1887 1888 void rdtsc(); 1889 1890 void ret(int imm16); 1891 1892 #ifdef _LP64 1893 void rorq(Register dst, int imm8); 1894 void rorxq(Register dst, Register src, int imm8); 1895 void rorxd(Register dst, Register src, int imm8); 1896 #endif 1897 1898 void sahf(); 1899 1900 void sarl(Register dst, int imm8); 1901 void sarl(Register dst); 1902 1903 void sarq(Register dst, int imm8); 1904 void sarq(Register dst); 1905 1906 void sbbl(Address dst, int32_t imm32); 1907 void sbbl(Register dst, int32_t imm32); 1908 void sbbl(Register dst, Address src); 1909 void sbbl(Register dst, Register src); 1910 1911 void sbbq(Address dst, int32_t imm32); 1912 void sbbq(Register dst, int32_t imm32); 1913 void sbbq(Register dst, Address src); 1914 void sbbq(Register dst, Register src); 1915 1916 void setb(Condition cc, Register dst); 1917 1918 void palignr(XMMRegister dst, XMMRegister src, int imm8); 1919 void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, int imm8, int vector_len); 1920 void evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 1921 1922 void pblendw(XMMRegister dst, XMMRegister src, int imm8); 1923 1924 void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8); 1925 void sha1nexte(XMMRegister dst, XMMRegister src); 1926 void sha1msg1(XMMRegister dst, XMMRegister src); 1927 void sha1msg2(XMMRegister dst, XMMRegister src); 1928 // xmm0 is implicit additional source to the following instruction. 1929 void sha256rnds2(XMMRegister dst, XMMRegister src); 1930 void sha256msg1(XMMRegister dst, XMMRegister src); 1931 void sha256msg2(XMMRegister dst, XMMRegister src); 1932 1933 void shldl(Register dst, Register src); 1934 void shldl(Register dst, Register src, int8_t imm8); 1935 1936 void shll(Register dst, int imm8); 1937 void shll(Register dst); 1938 1939 void shlq(Register dst, int imm8); 1940 void shlq(Register dst); 1941 1942 void shrdl(Register dst, Register src); 1943 1944 void shrl(Register dst, int imm8); 1945 void shrl(Register dst); 1946 1947 void shrq(Register dst, int imm8); 1948 void shrq(Register dst); 1949 1950 void smovl(); // QQQ generic? 1951 1952 // Compute Square Root of Scalar Double-Precision Floating-Point Value 1953 void sqrtsd(XMMRegister dst, Address src); 1954 void sqrtsd(XMMRegister dst, XMMRegister src); 1955 1956 // Compute Square Root of Scalar Single-Precision Floating-Point Value 1957 void sqrtss(XMMRegister dst, Address src); 1958 void sqrtss(XMMRegister dst, XMMRegister src); 1959 1960 void std(); 1961 1962 void stmxcsr( Address dst ); 1963 1964 void subl(Address dst, int32_t imm32); 1965 void subl(Address dst, Register src); 1966 void subl(Register dst, int32_t imm32); 1967 void subl(Register dst, Address src); 1968 void subl(Register dst, Register src); 1969 1970 void subq(Address dst, int32_t imm32); 1971 void subq(Address dst, Register src); 1972 void subq(Register dst, int32_t imm32); 1973 void subq(Register dst, Address src); 1974 void subq(Register dst, Register src); 1975 1976 // Force generation of a 4 byte immediate value even if it fits into 8bit 1977 void subl_imm32(Register dst, int32_t imm32); 1978 void subq_imm32(Register dst, int32_t imm32); 1979 1980 // Subtract Scalar Double-Precision Floating-Point Values 1981 void subsd(XMMRegister dst, Address src); 1982 void subsd(XMMRegister dst, XMMRegister src); 1983 1984 // Subtract Scalar Single-Precision Floating-Point Values 1985 void subss(XMMRegister dst, Address src); 1986 void subss(XMMRegister dst, XMMRegister src); 1987 1988 void testb(Register dst, int imm8); 1989 void testb(Address dst, int imm8); 1990 1991 void testl(Register dst, int32_t imm32); 1992 void testl(Register dst, Register src); 1993 void testl(Register dst, Address src); 1994 1995 void testq(Register dst, int32_t imm32); 1996 void testq(Register dst, Register src); 1997 void testq(Register dst, Address src); 1998 1999 // BMI - count trailing zeros 2000 void tzcntl(Register dst, Register src); 2001 void tzcntq(Register dst, Register src); 2002 2003 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 2004 void ucomisd(XMMRegister dst, Address src); 2005 void ucomisd(XMMRegister dst, XMMRegister src); 2006 2007 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 2008 void ucomiss(XMMRegister dst, Address src); 2009 void ucomiss(XMMRegister dst, XMMRegister src); 2010 2011 void xabort(int8_t imm8); 2012 2013 void xaddb(Address dst, Register src); 2014 void xaddw(Address dst, Register src); 2015 void xaddl(Address dst, Register src); 2016 void xaddq(Address dst, Register src); 2017 2018 void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none); 2019 2020 void xchgb(Register reg, Address adr); 2021 void xchgw(Register reg, Address adr); 2022 void xchgl(Register reg, Address adr); 2023 void xchgl(Register dst, Register src); 2024 2025 void xchgq(Register reg, Address adr); 2026 void xchgq(Register dst, Register src); 2027 2028 void xend(); 2029 2030 // Get Value of Extended Control Register 2031 void xgetbv(); 2032 2033 void xorl(Register dst, int32_t imm32); 2034 void xorl(Register dst, Address src); 2035 void xorl(Register dst, Register src); 2036 2037 void xorb(Register dst, Address src); 2038 void xorw(Register dst, Register src); 2039 2040 void xorq(Register dst, Address src); 2041 void xorq(Register dst, Register src); 2042 2043 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 2044 2045 // AVX 3-operands scalar instructions (encoded with VEX prefix) 2046 2047 void vaddsd(XMMRegister dst, XMMRegister nds, Address src); 2048 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 2049 void vaddss(XMMRegister dst, XMMRegister nds, Address src); 2050 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src); 2051 void vdivsd(XMMRegister dst, XMMRegister nds, Address src); 2052 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 2053 void vdivss(XMMRegister dst, XMMRegister nds, Address src); 2054 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src); 2055 void vfmadd231sd(XMMRegister dst, XMMRegister nds, XMMRegister src); 2056 void vfmadd231ss(XMMRegister dst, XMMRegister nds, XMMRegister src); 2057 void vmulsd(XMMRegister dst, XMMRegister nds, Address src); 2058 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 2059 void vmulss(XMMRegister dst, XMMRegister nds, Address src); 2060 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src); 2061 void vsubsd(XMMRegister dst, XMMRegister nds, Address src); 2062 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 2063 void vsubss(XMMRegister dst, XMMRegister nds, Address src); 2064 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); 2065 2066 void shlxl(Register dst, Register src1, Register src2); 2067 void shlxq(Register dst, Register src1, Register src2); 2068 2069 //====================VECTOR ARITHMETIC===================================== 2070 2071 // Add Packed Floating-Point Values 2072 void addpd(XMMRegister dst, XMMRegister src); 2073 void addpd(XMMRegister dst, Address src); 2074 void addps(XMMRegister dst, XMMRegister src); 2075 void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2076 void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2077 void vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2078 void vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2079 2080 // Subtract Packed Floating-Point Values 2081 void subpd(XMMRegister dst, XMMRegister src); 2082 void subps(XMMRegister dst, XMMRegister src); 2083 void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2084 void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2085 void vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2086 void vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2087 2088 // Multiply Packed Floating-Point Values 2089 void mulpd(XMMRegister dst, XMMRegister src); 2090 void mulpd(XMMRegister dst, Address src); 2091 void mulps(XMMRegister dst, XMMRegister src); 2092 void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2093 void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2094 void vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2095 void vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2096 2097 void vfmadd231pd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2098 void vfmadd231ps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2099 void vfmadd231pd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2100 void vfmadd231ps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2101 2102 // Divide Packed Floating-Point Values 2103 void divpd(XMMRegister dst, XMMRegister src); 2104 void divps(XMMRegister dst, XMMRegister src); 2105 void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2106 void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2107 void vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2108 void vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2109 2110 // Sqrt Packed Floating-Point Values 2111 void vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len); 2112 void vsqrtpd(XMMRegister dst, Address src, int vector_len); 2113 void vsqrtps(XMMRegister dst, XMMRegister src, int vector_len); 2114 void vsqrtps(XMMRegister dst, Address src, int vector_len); 2115 2116 // Bitwise Logical AND of Packed Floating-Point Values 2117 void andpd(XMMRegister dst, XMMRegister src); 2118 void andps(XMMRegister dst, XMMRegister src); 2119 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2120 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2121 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2122 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2123 2124 void unpckhpd(XMMRegister dst, XMMRegister src); 2125 void unpcklpd(XMMRegister dst, XMMRegister src); 2126 2127 // Bitwise Logical XOR of Packed Floating-Point Values 2128 void xorpd(XMMRegister dst, XMMRegister src); 2129 void xorps(XMMRegister dst, XMMRegister src); 2130 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2131 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2132 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2133 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2134 2135 // Add horizontal packed integers 2136 void vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2137 void vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2138 void phaddw(XMMRegister dst, XMMRegister src); 2139 void phaddd(XMMRegister dst, XMMRegister src); 2140 2141 // Add packed integers 2142 void paddb(XMMRegister dst, XMMRegister src); 2143 void paddw(XMMRegister dst, XMMRegister src); 2144 void paddd(XMMRegister dst, XMMRegister src); 2145 void paddd(XMMRegister dst, Address src); 2146 void paddq(XMMRegister dst, XMMRegister src); 2147 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2148 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2149 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2150 void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2151 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2152 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2153 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2154 void vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2155 2156 // Sub packed integers 2157 void psubb(XMMRegister dst, XMMRegister src); 2158 void psubw(XMMRegister dst, XMMRegister src); 2159 void psubd(XMMRegister dst, XMMRegister src); 2160 void psubq(XMMRegister dst, XMMRegister src); 2161 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2162 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2163 void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2164 void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2165 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2166 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2167 void vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2168 void vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2169 2170 // Multiply packed integers (only shorts and ints) 2171 void pmullw(XMMRegister dst, XMMRegister src); 2172 void pmulld(XMMRegister dst, XMMRegister src); 2173 void pmuludq(XMMRegister dst, XMMRegister src); 2174 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2175 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2176 void vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2177 void vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2178 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2179 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2180 void vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2181 2182 // Minimum of packed integers 2183 void pminsb(XMMRegister dst, XMMRegister src); 2184 void vpminsb(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2185 void pminsw(XMMRegister dst, XMMRegister src); 2186 void vpminsw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2187 void pminsd(XMMRegister dst, XMMRegister src); 2188 void vpminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2189 void vpminsq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2190 void minps(XMMRegister dst, XMMRegister src); 2191 void vminps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2192 void minpd(XMMRegister dst, XMMRegister src); 2193 void vminpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2194 2195 // Maximum of packed integers 2196 void pmaxsb(XMMRegister dst, XMMRegister src); 2197 void vpmaxsb(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2198 void pmaxsw(XMMRegister dst, XMMRegister src); 2199 void vpmaxsw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2200 void pmaxsd(XMMRegister dst, XMMRegister src); 2201 void vpmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2202 void vpmaxsq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2203 void maxps(XMMRegister dst, XMMRegister src); 2204 void vmaxps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2205 void maxpd(XMMRegister dst, XMMRegister src); 2206 void vmaxpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len); 2207 2208 // Shift left packed integers 2209 void psllw(XMMRegister dst, int shift); 2210 void pslld(XMMRegister dst, int shift); 2211 void psllq(XMMRegister dst, int shift); 2212 void psllw(XMMRegister dst, XMMRegister shift); 2213 void pslld(XMMRegister dst, XMMRegister shift); 2214 void psllq(XMMRegister dst, XMMRegister shift); 2215 void vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2216 void vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2217 void vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2218 void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2219 void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2220 void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2221 2222 // Logical shift right packed integers 2223 void psrlw(XMMRegister dst, int shift); 2224 void psrld(XMMRegister dst, int shift); 2225 void psrlq(XMMRegister dst, int shift); 2226 void psrlw(XMMRegister dst, XMMRegister shift); 2227 void psrld(XMMRegister dst, XMMRegister shift); 2228 void psrlq(XMMRegister dst, XMMRegister shift); 2229 void vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2230 void vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2231 void vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2232 void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2233 void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2234 void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2235 void evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2236 void evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2237 2238 // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs) 2239 void psraw(XMMRegister dst, int shift); 2240 void psrad(XMMRegister dst, int shift); 2241 void psraw(XMMRegister dst, XMMRegister shift); 2242 void psrad(XMMRegister dst, XMMRegister shift); 2243 void vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2244 void vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2245 void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2246 void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2247 void evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2248 void evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2249 2250 // Variable shift left packed integers 2251 void vpsllvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2252 void vpsllvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2253 2254 // Variable shift right packed integers 2255 void vpsrlvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2256 void vpsrlvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2257 2258 // Variable shift right arithmetic packed integers 2259 void vpsravd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2260 void evpsravq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2261 2262 // And packed integers 2263 void pand(XMMRegister dst, XMMRegister src); 2264 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2265 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2266 void evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2267 void vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2268 2269 // Andn packed integers 2270 void pandn(XMMRegister dst, XMMRegister src); 2271 2272 // Or packed integers 2273 void por(XMMRegister dst, XMMRegister src); 2274 void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2275 void vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2276 void vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2277 2278 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2279 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2280 2281 // Xor packed integers 2282 void pxor(XMMRegister dst, XMMRegister src); 2283 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2284 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2285 void vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2286 void evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2287 void evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2288 void evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2289 2290 // vinserti forms 2291 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2292 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2293 void vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2294 void vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2295 void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2296 2297 // vinsertf forms 2298 void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2299 void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2300 void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2301 void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2302 void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2303 void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2304 2305 // vextracti forms 2306 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8); 2307 void vextracti128(Address dst, XMMRegister src, uint8_t imm8); 2308 void vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2309 void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8); 2310 void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); 2311 void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2312 2313 // vextractf forms 2314 void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8); 2315 void vextractf128(Address dst, XMMRegister src, uint8_t imm8); 2316 void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2317 void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8); 2318 void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); 2319 void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2320 void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8); 2321 2322 // legacy xmm sourced word/dword replicate 2323 void vpbroadcastw(XMMRegister dst, XMMRegister src); 2324 void vpbroadcastd(XMMRegister dst, XMMRegister src); 2325 2326 // xmm/mem sourced byte/word/dword/qword replicate 2327 void evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len); 2328 void evpbroadcastb(XMMRegister dst, Address src, int vector_len); 2329 void evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 2330 void evpbroadcastw(XMMRegister dst, Address src, int vector_len); 2331 void evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len); 2332 void evpbroadcastd(XMMRegister dst, Address src, int vector_len); 2333 void evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len); 2334 void evpbroadcastq(XMMRegister dst, Address src, int vector_len); 2335 2336 void evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len); 2337 void evbroadcasti64x2(XMMRegister dst, Address src, int vector_len); 2338 2339 // scalar single/double precision replicate 2340 void evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len); 2341 void evpbroadcastss(XMMRegister dst, Address src, int vector_len); 2342 void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len); 2343 void evpbroadcastsd(XMMRegister dst, Address src, int vector_len); 2344 2345 // gpr sourced byte/word/dword/qword replicate 2346 void evpbroadcastb(XMMRegister dst, Register src, int vector_len); 2347 void evpbroadcastw(XMMRegister dst, Register src, int vector_len); 2348 void evpbroadcastd(XMMRegister dst, Register src, int vector_len); 2349 void evpbroadcastq(XMMRegister dst, Register src, int vector_len); 2350 2351 void evpgatherdd(XMMRegister dst, KRegister k1, Address src, int vector_len); 2352 2353 // Carry-Less Multiplication Quadword 2354 void pclmulqdq(XMMRegister dst, XMMRegister src, int mask); 2355 void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask); 2356 void evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len); 2357 // AVX instruction which is used to clear upper 128 bits of YMM registers and 2358 // to avoid transaction penalty between AVX and SSE states. There is no 2359 // penalty if legacy SSE instructions are encoded using VEX prefix because 2360 // they always clear upper 128 bits. It should be used before calling 2361 // runtime code and native libraries. 2362 void vzeroupper(); 2363 2364 // Vector double compares 2365 void vcmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len); 2366 void evcmppd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 2367 ComparisonPredicateFP comparison, int vector_len); 2368 2369 // Vector float compares 2370 void vcmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int comparison, int vector_len); 2371 void evcmpps(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 2372 ComparisonPredicateFP comparison, int vector_len); 2373 2374 // Vector integer compares 2375 void vpcmpgtd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2376 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 2377 int comparison, int vector_len); 2378 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 2379 int comparison, int vector_len); 2380 2381 // Vector long compares 2382 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 2383 int comparison, int vector_len); 2384 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 2385 int comparison, int vector_len); 2386 2387 // Vector byte compares 2388 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 2389 int comparison, int vector_len); 2390 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 2391 int comparison, int vector_len); 2392 2393 // Vector short compares 2394 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 2395 int comparison, int vector_len); 2396 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src, 2397 int comparison, int vector_len); 2398 2399 // Vector blends 2400 void blendvps(XMMRegister dst, XMMRegister src); 2401 void blendvpd(XMMRegister dst, XMMRegister src); 2402 void pblendvb(XMMRegister dst, XMMRegister src); 2403 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len); 2404 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len); 2405 void vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len); 2406 void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len); 2407 void evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2408 void evblendmps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2409 void evpblendmb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2410 void evpblendmw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2411 void evpblendmd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2412 void evpblendmq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2413 protected: 2414 // Next instructions require address alignment 16 bytes SSE mode. 2415 // They should be called only from corresponding MacroAssembler instructions. 2416 void andpd(XMMRegister dst, Address src); 2417 void andps(XMMRegister dst, Address src); 2418 void xorpd(XMMRegister dst, Address src); 2419 void xorps(XMMRegister dst, Address src); 2420 2421 }; 2422 2423 // The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions. 2424 // Specific set functions are for specialized use, else defaults or whatever was supplied to object construction 2425 // are applied. 2426 class InstructionAttr { 2427 public: 2428 InstructionAttr( 2429 int vector_len, // The length of vector to be applied in encoding - for both AVX and EVEX 2430 bool rex_vex_w, // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true 2431 bool legacy_mode, // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX 2432 bool no_reg_mask, // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition 2433 bool uses_vl) // This instruction may have legacy constraints based on vector length for EVEX 2434 : 2435 _avx_vector_len(vector_len), 2436 _rex_vex_w(rex_vex_w), 2437 _rex_vex_w_reverted(false), 2438 _legacy_mode(legacy_mode), 2439 _no_reg_mask(no_reg_mask), 2440 _uses_vl(uses_vl), 2441 _tuple_type(Assembler::EVEX_ETUP), 2442 _input_size_in_bits(Assembler::EVEX_NObit), 2443 _is_evex_instruction(false), 2444 _evex_encoding(0), 2445 _is_clear_context(true), 2446 _is_extended_context(false), 2447 _embedded_opmask_register_specifier(1), // hard code k1, it will be initialized for now 2448 _current_assembler(NULL) { 2449 if (UseAVX < 3) _legacy_mode = true; 2450 } 2451 2452 ~InstructionAttr() { 2453 if (_current_assembler != NULL) { 2454 _current_assembler->clear_attributes(); 2455 } 2456 _current_assembler = NULL; 2457 } 2458 2459 private: 2460 int _avx_vector_len; 2461 bool _rex_vex_w; 2462 bool _rex_vex_w_reverted; 2463 bool _legacy_mode; 2464 bool _no_reg_mask; 2465 bool _uses_vl; 2466 int _tuple_type; 2467 int _input_size_in_bits; 2468 bool _is_evex_instruction; 2469 int _evex_encoding; 2470 bool _is_clear_context; 2471 bool _is_extended_context; 2472 int _embedded_opmask_register_specifier; 2473 2474 Assembler *_current_assembler; 2475 2476 public: 2477 // query functions for field accessors 2478 int get_vector_len(void) const { return _avx_vector_len; } 2479 bool is_rex_vex_w(void) const { return _rex_vex_w; } 2480 bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; } 2481 bool is_legacy_mode(void) const { return _legacy_mode; } 2482 bool is_no_reg_mask(void) const { return _no_reg_mask; } 2483 bool uses_vl(void) const { return _uses_vl; } 2484 int get_tuple_type(void) const { return _tuple_type; } 2485 int get_input_size(void) const { return _input_size_in_bits; } 2486 int is_evex_instruction(void) const { return _is_evex_instruction; } 2487 int get_evex_encoding(void) const { return _evex_encoding; } 2488 bool is_clear_context(void) const { return _is_clear_context; } 2489 bool is_extended_context(void) const { return _is_extended_context; } 2490 int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; } 2491 2492 // Set the vector len manually 2493 void set_vector_len(int vector_len) { _avx_vector_len = vector_len; } 2494 2495 // Set revert rex_vex_w for avx encoding 2496 void set_rex_vex_w_reverted(void) { _rex_vex_w_reverted = true; } 2497 2498 // Set rex_vex_w based on state 2499 void set_rex_vex_w(bool state) { _rex_vex_w = state; } 2500 2501 // Set the instruction to be encoded in AVX mode 2502 void set_is_legacy_mode(void) { _legacy_mode = true; } 2503 2504 // Set the current instuction to be encoded as an EVEX instuction 2505 void set_is_evex_instruction(void) { _is_evex_instruction = true; } 2506 2507 // Internal encoding data used in compressed immediate offset programming 2508 void set_evex_encoding(int value) { _evex_encoding = value; } 2509 2510 // When the Evex.Z field is set (true), it is used to clear all non directed XMM/YMM/ZMM components. 2511 // This method unsets it so that merge semantics are used instead. 2512 void reset_is_clear_context(void) { _is_clear_context = false; } 2513 2514 // Map back to current asembler so that we can manage object level assocation 2515 void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; } 2516 2517 // Address modifiers used for compressed displacement calculation 2518 void set_address_attributes(int tuple_type, int input_size_in_bits) { 2519 if (VM_Version::supports_evex()) { 2520 _tuple_type = tuple_type; 2521 _input_size_in_bits = input_size_in_bits; 2522 } 2523 } 2524 2525 // Set embedded opmask register specifier. 2526 void set_embedded_opmask_register_specifier(KRegister mask) { 2527 _embedded_opmask_register_specifier = (*mask).encoding() & 0x7; 2528 } 2529 2530 }; 2531 2532 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP