1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_ASSEMBLER_X86_HPP 26 #define CPU_X86_ASSEMBLER_X86_HPP 27 28 #include "asm/register.hpp" 29 #include "runtime/vm_version.hpp" 30 #include "utilities/powerOfTwo.hpp" 31 32 class BiasedLockingCounters; 33 34 // Contains all the definitions needed for x86 assembly code generation. 35 36 // Calling convention 37 class Argument { 38 public: 39 enum { 40 #ifdef _LP64 41 #ifdef _WIN64 42 n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 43 n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) 44 #else 45 n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 46 n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) 47 #endif // _WIN64 48 n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... 49 n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... 50 #else 51 n_register_parameters = 0 // 0 registers used to pass arguments 52 #endif // _LP64 53 }; 54 }; 55 56 57 #ifdef _LP64 58 // Symbolically name the register arguments used by the c calling convention. 59 // Windows is different from linux/solaris. So much for standards... 60 61 #ifdef _WIN64 62 63 REGISTER_DECLARATION(Register, c_rarg0, rcx); 64 REGISTER_DECLARATION(Register, c_rarg1, rdx); 65 REGISTER_DECLARATION(Register, c_rarg2, r8); 66 REGISTER_DECLARATION(Register, c_rarg3, r9); 67 68 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 69 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 70 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 71 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 72 73 #else 74 75 REGISTER_DECLARATION(Register, c_rarg0, rdi); 76 REGISTER_DECLARATION(Register, c_rarg1, rsi); 77 REGISTER_DECLARATION(Register, c_rarg2, rdx); 78 REGISTER_DECLARATION(Register, c_rarg3, rcx); 79 REGISTER_DECLARATION(Register, c_rarg4, r8); 80 REGISTER_DECLARATION(Register, c_rarg5, r9); 81 82 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 83 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 84 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 85 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 86 REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); 87 REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); 88 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); 89 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); 90 91 #endif // _WIN64 92 93 // Symbolically name the register arguments used by the Java calling convention. 94 // We have control over the convention for java so we can do what we please. 95 // What pleases us is to offset the java calling convention so that when 96 // we call a suitable jni method the arguments are lined up and we don't 97 // have to do little shuffling. A suitable jni method is non-static and a 98 // small number of arguments (two fewer args on windows) 99 // 100 // |-------------------------------------------------------| 101 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | 102 // |-------------------------------------------------------| 103 // | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) 104 // | rdi rsi rdx rcx r8 r9 | solaris/linux 105 // |-------------------------------------------------------| 106 // | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | 107 // |-------------------------------------------------------| 108 109 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); 110 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); 111 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); 112 // Windows runs out of register args here 113 #ifdef _WIN64 114 REGISTER_DECLARATION(Register, j_rarg3, rdi); 115 REGISTER_DECLARATION(Register, j_rarg4, rsi); 116 #else 117 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); 118 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); 119 #endif /* _WIN64 */ 120 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); 121 122 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); 123 REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); 124 REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); 125 REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); 126 REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); 127 REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); 128 REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); 129 REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); 130 131 REGISTER_DECLARATION(Register, rscratch1, r10); // volatile 132 REGISTER_DECLARATION(Register, rscratch2, r11); // volatile 133 134 REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved 135 REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved 136 137 #else 138 // rscratch1 will apear in 32bit code that is dead but of course must compile 139 // Using noreg ensures if the dead code is incorrectly live and executed it 140 // will cause an assertion failure 141 #define rscratch1 noreg 142 #define rscratch2 noreg 143 144 #endif // _LP64 145 146 // JSR 292 147 // On x86, the SP does not have to be saved when invoking method handle intrinsics 148 // or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg. 149 REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg); 150 151 // Address is an abstraction used to represent a memory location 152 // using any of the amd64 addressing modes with one object. 153 // 154 // Note: A register location is represented via a Register, not 155 // via an address for efficiency & simplicity reasons. 156 157 class ArrayAddress; 158 159 class Address { 160 public: 161 enum ScaleFactor { 162 no_scale = -1, 163 times_1 = 0, 164 times_2 = 1, 165 times_4 = 2, 166 times_8 = 3, 167 times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4) 168 }; 169 static ScaleFactor times(int size) { 170 assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size"); 171 if (size == 8) return times_8; 172 if (size == 4) return times_4; 173 if (size == 2) return times_2; 174 return times_1; 175 } 176 static int scale_size(ScaleFactor scale) { 177 assert(scale != no_scale, ""); 178 assert(((1 << (int)times_1) == 1 && 179 (1 << (int)times_2) == 2 && 180 (1 << (int)times_4) == 4 && 181 (1 << (int)times_8) == 8), ""); 182 return (1 << (int)scale); 183 } 184 185 private: 186 Register _base; 187 Register _index; 188 XMMRegister _xmmindex; 189 ScaleFactor _scale; 190 int _disp; 191 bool _isxmmindex; 192 RelocationHolder _rspec; 193 194 // Easily misused constructors make them private 195 // %%% can we make these go away? 196 NOT_LP64(Address(address loc, RelocationHolder spec);) 197 Address(int disp, address loc, relocInfo::relocType rtype); 198 Address(int disp, address loc, RelocationHolder spec); 199 200 public: 201 202 int disp() { return _disp; } 203 // creation 204 Address() 205 : _base(noreg), 206 _index(noreg), 207 _xmmindex(xnoreg), 208 _scale(no_scale), 209 _disp(0), 210 _isxmmindex(false){ 211 } 212 213 // No default displacement otherwise Register can be implicitly 214 // converted to 0(Register) which is quite a different animal. 215 216 Address(Register base, int disp) 217 : _base(base), 218 _index(noreg), 219 _xmmindex(xnoreg), 220 _scale(no_scale), 221 _disp(disp), 222 _isxmmindex(false){ 223 } 224 225 Address(Register base, Register index, ScaleFactor scale, int disp = 0) 226 : _base (base), 227 _index(index), 228 _xmmindex(xnoreg), 229 _scale(scale), 230 _disp (disp), 231 _isxmmindex(false) { 232 assert(!index->is_valid() == (scale == Address::no_scale), 233 "inconsistent address"); 234 } 235 236 Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0) 237 : _base (base), 238 _index(index.register_or_noreg()), 239 _xmmindex(xnoreg), 240 _scale(scale), 241 _disp (disp + (index.constant_or_zero() * scale_size(scale))), 242 _isxmmindex(false){ 243 if (!index.is_register()) scale = Address::no_scale; 244 assert(!_index->is_valid() == (scale == Address::no_scale), 245 "inconsistent address"); 246 } 247 248 Address(Register base, XMMRegister index, ScaleFactor scale, int disp = 0) 249 : _base (base), 250 _index(noreg), 251 _xmmindex(index), 252 _scale(scale), 253 _disp(disp), 254 _isxmmindex(true) { 255 assert(!index->is_valid() == (scale == Address::no_scale), 256 "inconsistent address"); 257 } 258 259 Address plus_disp(int disp) const { 260 Address a = (*this); 261 a._disp += disp; 262 return a; 263 } 264 Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const { 265 Address a = (*this); 266 a._disp += disp.constant_or_zero() * scale_size(scale); 267 if (disp.is_register()) { 268 assert(!a.index()->is_valid(), "competing indexes"); 269 a._index = disp.as_register(); 270 a._scale = scale; 271 } 272 return a; 273 } 274 bool is_same_address(Address a) const { 275 // disregard _rspec 276 return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale; 277 } 278 279 // The following two overloads are used in connection with the 280 // ByteSize type (see sizes.hpp). They simplify the use of 281 // ByteSize'd arguments in assembly code. Note that their equivalent 282 // for the optimized build are the member functions with int disp 283 // argument since ByteSize is mapped to an int type in that case. 284 // 285 // Note: DO NOT introduce similar overloaded functions for WordSize 286 // arguments as in the optimized mode, both ByteSize and WordSize 287 // are mapped to the same type and thus the compiler cannot make a 288 // distinction anymore (=> compiler errors). 289 290 #ifdef ASSERT 291 Address(Register base, ByteSize disp) 292 : _base(base), 293 _index(noreg), 294 _xmmindex(xnoreg), 295 _scale(no_scale), 296 _disp(in_bytes(disp)), 297 _isxmmindex(false){ 298 } 299 300 Address(Register base, Register index, ScaleFactor scale, ByteSize disp) 301 : _base(base), 302 _index(index), 303 _xmmindex(xnoreg), 304 _scale(scale), 305 _disp(in_bytes(disp)), 306 _isxmmindex(false){ 307 assert(!index->is_valid() == (scale == Address::no_scale), 308 "inconsistent address"); 309 } 310 Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp) 311 : _base (base), 312 _index(index.register_or_noreg()), 313 _xmmindex(xnoreg), 314 _scale(scale), 315 _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))), 316 _isxmmindex(false) { 317 if (!index.is_register()) scale = Address::no_scale; 318 assert(!_index->is_valid() == (scale == Address::no_scale), 319 "inconsistent address"); 320 } 321 322 #endif // ASSERT 323 324 // accessors 325 bool uses(Register reg) const { return _base == reg || _index == reg; } 326 Register base() const { return _base; } 327 Register index() const { return _index; } 328 XMMRegister xmmindex() const { return _xmmindex; } 329 ScaleFactor scale() const { return _scale; } 330 int disp() const { return _disp; } 331 bool isxmmindex() const { return _isxmmindex; } 332 333 // Convert the raw encoding form into the form expected by the constructor for 334 // Address. An index of 4 (rsp) corresponds to having no index, so convert 335 // that to noreg for the Address constructor. 336 static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc); 337 338 static Address make_array(ArrayAddress); 339 340 private: 341 bool base_needs_rex() const { 342 return _base->is_valid() && _base->encoding() >= 8; 343 } 344 345 bool index_needs_rex() const { 346 return _index->is_valid() &&_index->encoding() >= 8; 347 } 348 349 bool xmmindex_needs_rex() const { 350 return _xmmindex->is_valid() && _xmmindex->encoding() >= 8; 351 } 352 353 relocInfo::relocType reloc() const { return _rspec.type(); } 354 355 friend class Assembler; 356 friend class MacroAssembler; 357 friend class LIR_Assembler; // base/index/scale/disp 358 }; 359 360 // 361 // AddressLiteral has been split out from Address because operands of this type 362 // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out 363 // the few instructions that need to deal with address literals are unique and the 364 // MacroAssembler does not have to implement every instruction in the Assembler 365 // in order to search for address literals that may need special handling depending 366 // on the instruction and the platform. As small step on the way to merging i486/amd64 367 // directories. 368 // 369 class AddressLiteral { 370 friend class ArrayAddress; 371 RelocationHolder _rspec; 372 // Typically we use AddressLiterals we want to use their rval 373 // However in some situations we want the lval (effect address) of the item. 374 // We provide a special factory for making those lvals. 375 bool _is_lval; 376 377 // If the target is far we'll need to load the ea of this to 378 // a register to reach it. Otherwise if near we can do rip 379 // relative addressing. 380 381 address _target; 382 383 protected: 384 // creation 385 AddressLiteral() 386 : _is_lval(false), 387 _target(NULL) 388 {} 389 390 public: 391 392 393 AddressLiteral(address target, relocInfo::relocType rtype); 394 395 AddressLiteral(address target, RelocationHolder const& rspec) 396 : _rspec(rspec), 397 _is_lval(false), 398 _target(target) 399 {} 400 401 AddressLiteral addr() { 402 AddressLiteral ret = *this; 403 ret._is_lval = true; 404 return ret; 405 } 406 407 408 private: 409 410 address target() { return _target; } 411 bool is_lval() { return _is_lval; } 412 413 relocInfo::relocType reloc() const { return _rspec.type(); } 414 const RelocationHolder& rspec() const { return _rspec; } 415 416 friend class Assembler; 417 friend class MacroAssembler; 418 friend class Address; 419 friend class LIR_Assembler; 420 }; 421 422 // Convience classes 423 class RuntimeAddress: public AddressLiteral { 424 425 public: 426 427 RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} 428 429 }; 430 431 class ExternalAddress: public AddressLiteral { 432 private: 433 static relocInfo::relocType reloc_for_target(address target) { 434 // Sometimes ExternalAddress is used for values which aren't 435 // exactly addresses, like the card table base. 436 // external_word_type can't be used for values in the first page 437 // so just skip the reloc in that case. 438 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 439 } 440 441 public: 442 443 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {} 444 445 }; 446 447 class InternalAddress: public AddressLiteral { 448 449 public: 450 451 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} 452 453 }; 454 455 // x86 can do array addressing as a single operation since disp can be an absolute 456 // address amd64 can't. We create a class that expresses the concept but does extra 457 // magic on amd64 to get the final result 458 459 class ArrayAddress { 460 private: 461 462 AddressLiteral _base; 463 Address _index; 464 465 public: 466 467 ArrayAddress() {}; 468 ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; 469 AddressLiteral base() { return _base; } 470 Address index() { return _index; } 471 472 }; 473 474 class InstructionAttr; 475 476 // 64-bit refect the fxsave size which is 512 bytes and the new xsave area on EVEX which is another 2176 bytes 477 // See fxsave and xsave(EVEX enabled) documentation for layout 478 const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY(2688 / wordSize); 479 480 // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction 481 // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write 482 // is what you get. The Assembler is generating code into a CodeBuffer. 483 484 class Assembler : public AbstractAssembler { 485 friend class AbstractAssembler; // for the non-virtual hack 486 friend class LIR_Assembler; // as_Address() 487 friend class StubGenerator; 488 489 public: 490 enum Condition { // The x86 condition codes used for conditional jumps/moves. 491 zero = 0x4, 492 notZero = 0x5, 493 equal = 0x4, 494 notEqual = 0x5, 495 less = 0xc, 496 lessEqual = 0xe, 497 greater = 0xf, 498 greaterEqual = 0xd, 499 below = 0x2, 500 belowEqual = 0x6, 501 above = 0x7, 502 aboveEqual = 0x3, 503 overflow = 0x0, 504 noOverflow = 0x1, 505 carrySet = 0x2, 506 carryClear = 0x3, 507 negative = 0x8, 508 positive = 0x9, 509 parity = 0xa, 510 noParity = 0xb 511 }; 512 513 enum Prefix { 514 // segment overrides 515 CS_segment = 0x2e, 516 SS_segment = 0x36, 517 DS_segment = 0x3e, 518 ES_segment = 0x26, 519 FS_segment = 0x64, 520 GS_segment = 0x65, 521 522 REX = 0x40, 523 524 REX_B = 0x41, 525 REX_X = 0x42, 526 REX_XB = 0x43, 527 REX_R = 0x44, 528 REX_RB = 0x45, 529 REX_RX = 0x46, 530 REX_RXB = 0x47, 531 532 REX_W = 0x48, 533 534 REX_WB = 0x49, 535 REX_WX = 0x4A, 536 REX_WXB = 0x4B, 537 REX_WR = 0x4C, 538 REX_WRB = 0x4D, 539 REX_WRX = 0x4E, 540 REX_WRXB = 0x4F, 541 542 VEX_3bytes = 0xC4, 543 VEX_2bytes = 0xC5, 544 EVEX_4bytes = 0x62, 545 Prefix_EMPTY = 0x0 546 }; 547 548 enum VexPrefix { 549 VEX_B = 0x20, 550 VEX_X = 0x40, 551 VEX_R = 0x80, 552 VEX_W = 0x80 553 }; 554 555 enum ExexPrefix { 556 EVEX_F = 0x04, 557 EVEX_V = 0x08, 558 EVEX_Rb = 0x10, 559 EVEX_X = 0x40, 560 EVEX_Z = 0x80 561 }; 562 563 enum VexSimdPrefix { 564 VEX_SIMD_NONE = 0x0, 565 VEX_SIMD_66 = 0x1, 566 VEX_SIMD_F3 = 0x2, 567 VEX_SIMD_F2 = 0x3 568 }; 569 570 enum VexOpcode { 571 VEX_OPCODE_NONE = 0x0, 572 VEX_OPCODE_0F = 0x1, 573 VEX_OPCODE_0F_38 = 0x2, 574 VEX_OPCODE_0F_3A = 0x3, 575 VEX_OPCODE_MASK = 0x1F 576 }; 577 578 enum AvxVectorLen { 579 AVX_128bit = 0x0, 580 AVX_256bit = 0x1, 581 AVX_512bit = 0x2, 582 AVX_NoVec = 0x4 583 }; 584 585 enum EvexTupleType { 586 EVEX_FV = 0, 587 EVEX_HV = 4, 588 EVEX_FVM = 6, 589 EVEX_T1S = 7, 590 EVEX_T1F = 11, 591 EVEX_T2 = 13, 592 EVEX_T4 = 15, 593 EVEX_T8 = 17, 594 EVEX_HVM = 18, 595 EVEX_QVM = 19, 596 EVEX_OVM = 20, 597 EVEX_M128 = 21, 598 EVEX_DUP = 22, 599 EVEX_ETUP = 23 600 }; 601 602 enum EvexInputSizeInBits { 603 EVEX_8bit = 0, 604 EVEX_16bit = 1, 605 EVEX_32bit = 2, 606 EVEX_64bit = 3, 607 EVEX_NObit = 4 608 }; 609 610 enum WhichOperand { 611 // input to locate_operand, and format code for relocations 612 imm_operand = 0, // embedded 32-bit|64-bit immediate operand 613 disp32_operand = 1, // embedded 32-bit displacement or address 614 call32_operand = 2, // embedded 32-bit self-relative displacement 615 #ifndef _LP64 616 _WhichOperand_limit = 3 617 #else 618 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop 619 _WhichOperand_limit = 4 620 #endif 621 }; 622 623 enum ComparisonPredicate { 624 eq = 0, 625 lt = 1, 626 le = 2, 627 _false = 3, 628 neq = 4, 629 nlt = 5, 630 nle = 6, 631 _true = 7 632 }; 633 634 //---< calculate length of instruction >--- 635 // As instruction size can't be found out easily on x86/x64, 636 // we just use '4' for len and maxlen. 637 // instruction must start at passed address 638 static unsigned int instr_len(unsigned char *instr) { return 4; } 639 640 //---< longest instructions >--- 641 // Max instruction length is not specified in architecture documentation. 642 // We could use a "safe enough" estimate (15), but just default to 643 // instruction length guess from above. 644 static unsigned int instr_maxlen() { return 4; } 645 646 // NOTE: The general philopsophy of the declarations here is that 64bit versions 647 // of instructions are freely declared without the need for wrapping them an ifdef. 648 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.) 649 // In the .cpp file the implementations are wrapped so that they are dropped out 650 // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL 651 // to the size it was prior to merging up the 32bit and 64bit assemblers. 652 // 653 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction 654 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down. 655 656 private: 657 658 bool _legacy_mode_bw; 659 bool _legacy_mode_dq; 660 bool _legacy_mode_vl; 661 bool _legacy_mode_vlbw; 662 NOT_LP64(bool _is_managed;) 663 664 class InstructionAttr *_attributes; 665 666 // 64bit prefixes 667 void prefix(Register reg); 668 void prefix(Register dst, Register src, Prefix p); 669 void prefix(Register dst, Address adr, Prefix p); 670 671 void prefix(Address adr); 672 void prefix(Address adr, Register reg, bool byteinst = false); 673 void prefix(Address adr, XMMRegister reg); 674 675 int prefix_and_encode(int reg_enc, bool byteinst = false); 676 int prefix_and_encode(int dst_enc, int src_enc) { 677 return prefix_and_encode(dst_enc, false, src_enc, false); 678 } 679 int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte); 680 681 // Some prefixq variants always emit exactly one prefix byte, so besides a 682 // prefix-emitting method we provide a method to get the prefix byte to emit, 683 // which can then be folded into a byte stream. 684 int8_t get_prefixq(Address adr); 685 int8_t get_prefixq(Address adr, Register reg); 686 687 void prefixq(Address adr); 688 void prefixq(Address adr, Register reg); 689 void prefixq(Address adr, XMMRegister reg); 690 691 int prefixq_and_encode(int reg_enc); 692 int prefixq_and_encode(int dst_enc, int src_enc); 693 694 void rex_prefix(Address adr, XMMRegister xreg, 695 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 696 int rex_prefix_and_encode(int dst_enc, int src_enc, 697 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 698 699 void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc); 700 701 void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, 702 int nds_enc, VexSimdPrefix pre, VexOpcode opc); 703 704 void vex_prefix(Address adr, int nds_enc, int xreg_enc, 705 VexSimdPrefix pre, VexOpcode opc, 706 InstructionAttr *attributes); 707 708 int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, 709 VexSimdPrefix pre, VexOpcode opc, 710 InstructionAttr *attributes); 711 712 void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 713 VexOpcode opc, InstructionAttr *attributes); 714 715 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 716 VexOpcode opc, InstructionAttr *attributes); 717 718 // Helper functions for groups of instructions 719 void emit_arith_b(int op1, int op2, Register dst, int imm8); 720 721 void emit_arith(int op1, int op2, Register dst, int32_t imm32); 722 // Force generation of a 4 byte immediate value even if it fits into 8bit 723 void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32); 724 void emit_arith(int op1, int op2, Register dst, Register src); 725 726 bool emit_compressed_disp_byte(int &disp); 727 728 void emit_modrm(int mod, int dst_enc, int src_enc); 729 void emit_modrm_disp8(int mod, int dst_enc, int src_enc, 730 int disp); 731 void emit_modrm_sib(int mod, int dst_enc, int src_enc, 732 Address::ScaleFactor scale, int index_enc, int base_enc); 733 void emit_modrm_sib_disp8(int mod, int dst_enc, int src_enc, 734 Address::ScaleFactor scale, int index_enc, int base_enc, 735 int disp); 736 737 void emit_operand_helper(int reg_enc, 738 int base_enc, int index_enc, Address::ScaleFactor scale, 739 int disp, 740 RelocationHolder const& rspec, 741 int rip_relative_correction = 0); 742 743 void emit_operand(Register reg, 744 Register base, Register index, Address::ScaleFactor scale, 745 int disp, 746 RelocationHolder const& rspec, 747 int rip_relative_correction = 0); 748 749 void emit_operand(Register reg, 750 Register base, XMMRegister index, Address::ScaleFactor scale, 751 int disp, 752 RelocationHolder const& rspec); 753 754 void emit_operand(XMMRegister xreg, 755 Register base, XMMRegister xindex, Address::ScaleFactor scale, 756 int disp, 757 RelocationHolder const& rspec); 758 759 void emit_operand(Register reg, Address adr, 760 int rip_relative_correction = 0); 761 762 void emit_operand(XMMRegister reg, 763 Register base, Register index, Address::ScaleFactor scale, 764 int disp, 765 RelocationHolder const& rspec); 766 767 void emit_operand(XMMRegister reg, Address adr); 768 769 // Immediate-to-memory forms 770 void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32); 771 772 protected: 773 #ifdef ASSERT 774 void check_relocation(RelocationHolder const& rspec, int format); 775 #endif 776 777 void emit_data(jint data, relocInfo::relocType rtype, int format); 778 void emit_data(jint data, RelocationHolder const& rspec, int format); 779 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 780 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 781 782 bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); 783 784 // These are all easily abused and hence protected 785 786 // 32BIT ONLY SECTION 787 #ifndef _LP64 788 // Make these disappear in 64bit mode since they would never be correct 789 void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 790 void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 791 792 void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 793 void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 794 795 void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 796 #else 797 // 64BIT ONLY SECTION 798 void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY 799 800 void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec); 801 void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec); 802 803 void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec); 804 void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec); 805 #endif // _LP64 806 807 // These are unique in that we are ensured by the caller that the 32bit 808 // relative in these instructions will always be able to reach the potentially 809 // 64bit address described by entry. Since they can take a 64bit address they 810 // don't have the 32 suffix like the other instructions in this class. 811 812 void call_literal(address entry, RelocationHolder const& rspec); 813 void jmp_literal(address entry, RelocationHolder const& rspec); 814 815 // Avoid using directly section 816 // Instructions in this section are actually usable by anyone without danger 817 // of failure but have performance issues that are addressed my enhanced 818 // instructions which will do the proper thing base on the particular cpu. 819 // We protect them because we don't trust you... 820 821 // Don't use next inc() and dec() methods directly. INC & DEC instructions 822 // could cause a partial flag stall since they don't set CF flag. 823 // Use MacroAssembler::decrement() & MacroAssembler::increment() methods 824 // which call inc() & dec() or add() & sub() in accordance with 825 // the product flag UseIncDec value. 826 827 void decl(Register dst); 828 void decl(Address dst); 829 void decq(Register dst); 830 void decq(Address dst); 831 832 void incl(Register dst); 833 void incl(Address dst); 834 void incq(Register dst); 835 void incq(Address dst); 836 837 // New cpus require use of movsd and movss to avoid partial register stall 838 // when loading from memory. But for old Opteron use movlpd instead of movsd. 839 // The selection is done in MacroAssembler::movdbl() and movflt(). 840 841 // Move Scalar Single-Precision Floating-Point Values 842 void movss(XMMRegister dst, Address src); 843 void movss(XMMRegister dst, XMMRegister src); 844 void movss(Address dst, XMMRegister src); 845 846 // Move Scalar Double-Precision Floating-Point Values 847 void movsd(XMMRegister dst, Address src); 848 void movsd(XMMRegister dst, XMMRegister src); 849 void movsd(Address dst, XMMRegister src); 850 void movlpd(XMMRegister dst, Address src); 851 852 // New cpus require use of movaps and movapd to avoid partial register stall 853 // when moving between registers. 854 void movaps(XMMRegister dst, XMMRegister src); 855 void movapd(XMMRegister dst, XMMRegister src); 856 857 // End avoid using directly 858 859 860 // Instruction prefixes 861 void prefix(Prefix p); 862 863 public: 864 865 // Creation 866 Assembler(CodeBuffer* code) : AbstractAssembler(code) { 867 init_attributes(); 868 } 869 870 // Decoding 871 static address locate_operand(address inst, WhichOperand which); 872 static address locate_next_instruction(address inst); 873 874 // Utilities 875 static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 876 int cur_tuple_type, int in_size_in_bits, int cur_encoding); 877 878 // Generic instructions 879 // Does 32bit or 64bit as needed for the platform. In some sense these 880 // belong in macro assembler but there is no need for both varieties to exist 881 882 void init_attributes(void) { 883 _legacy_mode_bw = (VM_Version::supports_avx512bw() == false); 884 _legacy_mode_dq = (VM_Version::supports_avx512dq() == false); 885 _legacy_mode_vl = (VM_Version::supports_avx512vl() == false); 886 _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false); 887 NOT_LP64(_is_managed = false;) 888 _attributes = NULL; 889 } 890 891 void set_attributes(InstructionAttr *attributes) { _attributes = attributes; } 892 void clear_attributes(void) { _attributes = NULL; } 893 894 void set_managed(void) { NOT_LP64(_is_managed = true;) } 895 void clear_managed(void) { NOT_LP64(_is_managed = false;) } 896 bool is_managed(void) { 897 NOT_LP64(return _is_managed;) 898 LP64_ONLY(return false;) } 899 900 void lea(Register dst, Address src); 901 902 void mov(Register dst, Register src); 903 904 #ifdef _LP64 905 // support caching the result of some routines 906 907 // must be called before pusha(), popa(), vzeroupper() - checked with asserts 908 static void precompute_instructions(); 909 910 void pusha_uncached(); 911 void popa_uncached(); 912 #endif 913 void vzeroupper_uncached(); 914 915 void pusha(); 916 void popa(); 917 918 void pushf(); 919 void popf(); 920 921 void push(int32_t imm32); 922 923 void push(Register src); 924 925 void pop(Register dst); 926 927 // These are dummies to prevent surprise implicit conversions to Register 928 void push(void* v); 929 void pop(void* v); 930 931 // These do register sized moves/scans 932 void rep_mov(); 933 void rep_stos(); 934 void rep_stosb(); 935 void repne_scan(); 936 #ifdef _LP64 937 void repne_scanl(); 938 #endif 939 940 // Vanilla instructions in lexical order 941 942 void adcl(Address dst, int32_t imm32); 943 void adcl(Address dst, Register src); 944 void adcl(Register dst, int32_t imm32); 945 void adcl(Register dst, Address src); 946 void adcl(Register dst, Register src); 947 948 void adcq(Register dst, int32_t imm32); 949 void adcq(Register dst, Address src); 950 void adcq(Register dst, Register src); 951 952 void addb(Address dst, int imm8); 953 void addw(Address dst, int imm16); 954 955 void addl(Address dst, int32_t imm32); 956 void addl(Address dst, Register src); 957 void addl(Register dst, int32_t imm32); 958 void addl(Register dst, Address src); 959 void addl(Register dst, Register src); 960 961 void addq(Address dst, int32_t imm32); 962 void addq(Address dst, Register src); 963 void addq(Register dst, int32_t imm32); 964 void addq(Register dst, Address src); 965 void addq(Register dst, Register src); 966 967 #ifdef _LP64 968 //Add Unsigned Integers with Carry Flag 969 void adcxq(Register dst, Register src); 970 971 //Add Unsigned Integers with Overflow Flag 972 void adoxq(Register dst, Register src); 973 #endif 974 975 void addr_nop_4(); 976 void addr_nop_5(); 977 void addr_nop_7(); 978 void addr_nop_8(); 979 980 // Add Scalar Double-Precision Floating-Point Values 981 void addsd(XMMRegister dst, Address src); 982 void addsd(XMMRegister dst, XMMRegister src); 983 984 // Add Scalar Single-Precision Floating-Point Values 985 void addss(XMMRegister dst, Address src); 986 void addss(XMMRegister dst, XMMRegister src); 987 988 // AES instructions 989 void aesdec(XMMRegister dst, Address src); 990 void aesdec(XMMRegister dst, XMMRegister src); 991 void aesdeclast(XMMRegister dst, Address src); 992 void aesdeclast(XMMRegister dst, XMMRegister src); 993 void aesenc(XMMRegister dst, Address src); 994 void aesenc(XMMRegister dst, XMMRegister src); 995 void aesenclast(XMMRegister dst, Address src); 996 void aesenclast(XMMRegister dst, XMMRegister src); 997 // Vector AES instructions 998 void vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 999 void vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1000 void vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1001 void vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1002 1003 void andl(Address dst, int32_t imm32); 1004 void andl(Register dst, int32_t imm32); 1005 void andl(Register dst, Address src); 1006 void andl(Register dst, Register src); 1007 1008 void andq(Address dst, int32_t imm32); 1009 void andq(Register dst, int32_t imm32); 1010 void andq(Register dst, Address src); 1011 void andq(Register dst, Register src); 1012 1013 // BMI instructions 1014 void andnl(Register dst, Register src1, Register src2); 1015 void andnl(Register dst, Register src1, Address src2); 1016 void andnq(Register dst, Register src1, Register src2); 1017 void andnq(Register dst, Register src1, Address src2); 1018 1019 void blsil(Register dst, Register src); 1020 void blsil(Register dst, Address src); 1021 void blsiq(Register dst, Register src); 1022 void blsiq(Register dst, Address src); 1023 1024 void blsmskl(Register dst, Register src); 1025 void blsmskl(Register dst, Address src); 1026 void blsmskq(Register dst, Register src); 1027 void blsmskq(Register dst, Address src); 1028 1029 void blsrl(Register dst, Register src); 1030 void blsrl(Register dst, Address src); 1031 void blsrq(Register dst, Register src); 1032 void blsrq(Register dst, Address src); 1033 1034 void bsfl(Register dst, Register src); 1035 void bsrl(Register dst, Register src); 1036 1037 #ifdef _LP64 1038 void bsfq(Register dst, Register src); 1039 void bsrq(Register dst, Register src); 1040 #endif 1041 1042 void bswapl(Register reg); 1043 1044 void bswapq(Register reg); 1045 1046 void call(Label& L, relocInfo::relocType rtype); 1047 void call(Register reg); // push pc; pc <- reg 1048 void call(Address adr); // push pc; pc <- adr 1049 1050 void cdql(); 1051 1052 void cdqq(); 1053 1054 void cld(); 1055 1056 void clflush(Address adr); 1057 void clflushopt(Address adr); 1058 void clwb(Address adr); 1059 1060 void cmovl(Condition cc, Register dst, Register src); 1061 void cmovl(Condition cc, Register dst, Address src); 1062 1063 void cmovq(Condition cc, Register dst, Register src); 1064 void cmovq(Condition cc, Register dst, Address src); 1065 1066 1067 void cmpb(Address dst, int imm8); 1068 1069 void cmpl(Address dst, int32_t imm32); 1070 1071 void cmpl(Register dst, int32_t imm32); 1072 void cmpl(Register dst, Register src); 1073 void cmpl(Register dst, Address src); 1074 1075 void cmpq(Address dst, int32_t imm32); 1076 void cmpq(Address dst, Register src); 1077 1078 void cmpq(Register dst, int32_t imm32); 1079 void cmpq(Register dst, Register src); 1080 void cmpq(Register dst, Address src); 1081 1082 // these are dummies used to catch attempting to convert NULL to Register 1083 void cmpl(Register dst, void* junk); // dummy 1084 void cmpq(Register dst, void* junk); // dummy 1085 1086 void cmpw(Address dst, int imm16); 1087 1088 void cmpxchg8 (Address adr); 1089 1090 void cmpxchgb(Register reg, Address adr); 1091 void cmpxchgl(Register reg, Address adr); 1092 1093 void cmpxchgq(Register reg, Address adr); 1094 1095 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 1096 void comisd(XMMRegister dst, Address src); 1097 void comisd(XMMRegister dst, XMMRegister src); 1098 1099 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 1100 void comiss(XMMRegister dst, Address src); 1101 void comiss(XMMRegister dst, XMMRegister src); 1102 1103 // Identify processor type and features 1104 void cpuid(); 1105 1106 // CRC32C 1107 void crc32(Register crc, Register v, int8_t sizeInBytes); 1108 void crc32(Register crc, Address adr, int8_t sizeInBytes); 1109 1110 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value 1111 void cvtsd2ss(XMMRegister dst, XMMRegister src); 1112 void cvtsd2ss(XMMRegister dst, Address src); 1113 1114 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value 1115 void cvtsi2sdl(XMMRegister dst, Register src); 1116 void cvtsi2sdl(XMMRegister dst, Address src); 1117 void cvtsi2sdq(XMMRegister dst, Register src); 1118 void cvtsi2sdq(XMMRegister dst, Address src); 1119 1120 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value 1121 void cvtsi2ssl(XMMRegister dst, Register src); 1122 void cvtsi2ssl(XMMRegister dst, Address src); 1123 void cvtsi2ssq(XMMRegister dst, Register src); 1124 void cvtsi2ssq(XMMRegister dst, Address src); 1125 1126 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value 1127 void cvtdq2pd(XMMRegister dst, XMMRegister src); 1128 1129 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value 1130 void cvtdq2ps(XMMRegister dst, XMMRegister src); 1131 1132 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value 1133 void cvtss2sd(XMMRegister dst, XMMRegister src); 1134 void cvtss2sd(XMMRegister dst, Address src); 1135 1136 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer 1137 void cvttsd2sil(Register dst, Address src); 1138 void cvttsd2sil(Register dst, XMMRegister src); 1139 void cvttsd2siq(Register dst, Address src); 1140 void cvttsd2siq(Register dst, XMMRegister src); 1141 1142 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer 1143 void cvttss2sil(Register dst, XMMRegister src); 1144 void cvttss2siq(Register dst, XMMRegister src); 1145 1146 void cvttpd2dq(XMMRegister dst, XMMRegister src); 1147 1148 //Abs of packed Integer values 1149 void pabsb(XMMRegister dst, XMMRegister src); 1150 void pabsw(XMMRegister dst, XMMRegister src); 1151 void pabsd(XMMRegister dst, XMMRegister src); 1152 void vpabsb(XMMRegister dst, XMMRegister src, int vector_len); 1153 void vpabsw(XMMRegister dst, XMMRegister src, int vector_len); 1154 void vpabsd(XMMRegister dst, XMMRegister src, int vector_len); 1155 void evpabsq(XMMRegister dst, XMMRegister src, int vector_len); 1156 1157 // Divide Scalar Double-Precision Floating-Point Values 1158 void divsd(XMMRegister dst, Address src); 1159 void divsd(XMMRegister dst, XMMRegister src); 1160 1161 // Divide Scalar Single-Precision Floating-Point Values 1162 void divss(XMMRegister dst, Address src); 1163 void divss(XMMRegister dst, XMMRegister src); 1164 1165 1166 #ifndef _LP64 1167 private: 1168 // operands that only take the original 32bit registers 1169 void emit_operand32(Register reg, Address adr); 1170 1171 void emit_farith(int b1, int b2, int i); 1172 1173 public: 1174 void emms(); 1175 1176 void fabs(); 1177 1178 void fadd(int i); 1179 1180 void fadd_d(Address src); 1181 void fadd_s(Address src); 1182 1183 // "Alternate" versions of x87 instructions place result down in FPU 1184 // stack instead of on TOS 1185 1186 void fadda(int i); // "alternate" fadd 1187 void faddp(int i = 1); 1188 1189 void fchs(); 1190 1191 void fcom(int i); 1192 1193 void fcomp(int i = 1); 1194 void fcomp_d(Address src); 1195 void fcomp_s(Address src); 1196 1197 void fcompp(); 1198 1199 void fcos(); 1200 1201 void fdecstp(); 1202 1203 void fdiv(int i); 1204 void fdiv_d(Address src); 1205 void fdivr_s(Address src); 1206 void fdiva(int i); // "alternate" fdiv 1207 void fdivp(int i = 1); 1208 1209 void fdivr(int i); 1210 void fdivr_d(Address src); 1211 void fdiv_s(Address src); 1212 1213 void fdivra(int i); // "alternate" reversed fdiv 1214 1215 void fdivrp(int i = 1); 1216 1217 void ffree(int i = 0); 1218 1219 void fild_d(Address adr); 1220 void fild_s(Address adr); 1221 1222 void fincstp(); 1223 1224 void finit(); 1225 1226 void fist_s (Address adr); 1227 void fistp_d(Address adr); 1228 void fistp_s(Address adr); 1229 1230 void fld1(); 1231 1232 void fld_d(Address adr); 1233 void fld_s(Address adr); 1234 void fld_s(int index); 1235 void fld_x(Address adr); // extended-precision (80-bit) format 1236 1237 void fldcw(Address src); 1238 1239 void fldenv(Address src); 1240 1241 void fldlg2(); 1242 1243 void fldln2(); 1244 1245 void fldz(); 1246 1247 void flog(); 1248 void flog10(); 1249 1250 void fmul(int i); 1251 1252 void fmul_d(Address src); 1253 void fmul_s(Address src); 1254 1255 void fmula(int i); // "alternate" fmul 1256 1257 void fmulp(int i = 1); 1258 1259 void fnsave(Address dst); 1260 1261 void fnstcw(Address src); 1262 1263 void fnstsw_ax(); 1264 1265 void fprem(); 1266 void fprem1(); 1267 1268 void frstor(Address src); 1269 1270 void fsin(); 1271 1272 void fsqrt(); 1273 1274 void fst_d(Address adr); 1275 void fst_s(Address adr); 1276 1277 void fstp_d(Address adr); 1278 void fstp_d(int index); 1279 void fstp_s(Address adr); 1280 void fstp_x(Address adr); // extended-precision (80-bit) format 1281 1282 void fsub(int i); 1283 void fsub_d(Address src); 1284 void fsub_s(Address src); 1285 1286 void fsuba(int i); // "alternate" fsub 1287 1288 void fsubp(int i = 1); 1289 1290 void fsubr(int i); 1291 void fsubr_d(Address src); 1292 void fsubr_s(Address src); 1293 1294 void fsubra(int i); // "alternate" reversed fsub 1295 1296 void fsubrp(int i = 1); 1297 1298 void ftan(); 1299 1300 void ftst(); 1301 1302 void fucomi(int i = 1); 1303 void fucomip(int i = 1); 1304 1305 void fwait(); 1306 1307 void fxch(int i = 1); 1308 1309 void fyl2x(); 1310 void frndint(); 1311 void f2xm1(); 1312 void fldl2e(); 1313 #endif // !_LP64 1314 1315 void fxrstor(Address src); 1316 void xrstor(Address src); 1317 1318 void fxsave(Address dst); 1319 void xsave(Address dst); 1320 1321 void hlt(); 1322 1323 void idivl(Register src); 1324 void divl(Register src); // Unsigned division 1325 1326 #ifdef _LP64 1327 void idivq(Register src); 1328 #endif 1329 1330 void imull(Register src); 1331 void imull(Register dst, Register src); 1332 void imull(Register dst, Register src, int value); 1333 void imull(Register dst, Address src); 1334 1335 #ifdef _LP64 1336 void imulq(Register dst, Register src); 1337 void imulq(Register dst, Register src, int value); 1338 void imulq(Register dst, Address src); 1339 #endif 1340 1341 // jcc is the generic conditional branch generator to run- 1342 // time routines, jcc is used for branches to labels. jcc 1343 // takes a branch opcode (cc) and a label (L) and generates 1344 // either a backward branch or a forward branch and links it 1345 // to the label fixup chain. Usage: 1346 // 1347 // Label L; // unbound label 1348 // jcc(cc, L); // forward branch to unbound label 1349 // bind(L); // bind label to the current pc 1350 // jcc(cc, L); // backward branch to bound label 1351 // bind(L); // illegal: a label may be bound only once 1352 // 1353 // Note: The same Label can be used for forward and backward branches 1354 // but it may be bound only once. 1355 1356 void jcc(Condition cc, Label& L, bool maybe_short = true); 1357 1358 // Conditional jump to a 8-bit offset to L. 1359 // WARNING: be very careful using this for forward jumps. If the label is 1360 // not bound within an 8-bit offset of this instruction, a run-time error 1361 // will occur. 1362 1363 // Use macro to record file and line number. 1364 #define jccb(cc, L) jccb_0(cc, L, __FILE__, __LINE__) 1365 1366 void jccb_0(Condition cc, Label& L, const char* file, int line); 1367 1368 void jmp(Address entry); // pc <- entry 1369 1370 // Label operations & relative jumps (PPUM Appendix D) 1371 void jmp(Label& L, bool maybe_short = true); // unconditional jump to L 1372 1373 void jmp(Register entry); // pc <- entry 1374 1375 // Unconditional 8-bit offset jump to L. 1376 // WARNING: be very careful using this for forward jumps. If the label is 1377 // not bound within an 8-bit offset of this instruction, a run-time error 1378 // will occur. 1379 1380 // Use macro to record file and line number. 1381 #define jmpb(L) jmpb_0(L, __FILE__, __LINE__) 1382 1383 void jmpb_0(Label& L, const char* file, int line); 1384 1385 void ldmxcsr( Address src ); 1386 1387 void leal(Register dst, Address src); 1388 1389 void leaq(Register dst, Address src); 1390 1391 void lfence(); 1392 1393 void lock(); 1394 1395 void lzcntl(Register dst, Register src); 1396 1397 #ifdef _LP64 1398 void lzcntq(Register dst, Register src); 1399 #endif 1400 1401 enum Membar_mask_bits { 1402 StoreStore = 1 << 3, 1403 LoadStore = 1 << 2, 1404 StoreLoad = 1 << 1, 1405 LoadLoad = 1 << 0 1406 }; 1407 1408 // Serializes memory and blows flags 1409 void membar(Membar_mask_bits order_constraint) { 1410 // We only have to handle StoreLoad 1411 if (order_constraint & StoreLoad) { 1412 // All usable chips support "locked" instructions which suffice 1413 // as barriers, and are much faster than the alternative of 1414 // using cpuid instruction. We use here a locked add [esp-C],0. 1415 // This is conveniently otherwise a no-op except for blowing 1416 // flags, and introducing a false dependency on target memory 1417 // location. We can't do anything with flags, but we can avoid 1418 // memory dependencies in the current method by locked-adding 1419 // somewhere else on the stack. Doing [esp+C] will collide with 1420 // something on stack in current method, hence we go for [esp-C]. 1421 // It is convenient since it is almost always in data cache, for 1422 // any small C. We need to step back from SP to avoid data 1423 // dependencies with other things on below SP (callee-saves, for 1424 // example). Without a clear way to figure out the minimal safe 1425 // distance from SP, it makes sense to step back the complete 1426 // cache line, as this will also avoid possible second-order effects 1427 // with locked ops against the cache line. Our choice of offset 1428 // is bounded by x86 operand encoding, which should stay within 1429 // [-128; +127] to have the 8-byte displacement encoding. 1430 // 1431 // Any change to this code may need to revisit other places in 1432 // the code where this idiom is used, in particular the 1433 // orderAccess code. 1434 1435 int offset = -VM_Version::L1_line_size(); 1436 if (offset < -128) { 1437 offset = -128; 1438 } 1439 1440 lock(); 1441 addl(Address(rsp, offset), 0);// Assert the lock# signal here 1442 } 1443 } 1444 1445 void mfence(); 1446 void sfence(); 1447 1448 // Moves 1449 1450 void mov64(Register dst, int64_t imm64); 1451 1452 void movb(Address dst, Register src); 1453 void movb(Address dst, int imm8); 1454 void movb(Register dst, Address src); 1455 1456 void movddup(XMMRegister dst, XMMRegister src); 1457 1458 void kmovbl(KRegister dst, Register src); 1459 void kmovbl(Register dst, KRegister src); 1460 void kmovwl(KRegister dst, Register src); 1461 void kmovwl(KRegister dst, Address src); 1462 void kmovwl(Register dst, KRegister src); 1463 void kmovdl(KRegister dst, Register src); 1464 void kmovdl(Register dst, KRegister src); 1465 void kmovql(KRegister dst, KRegister src); 1466 void kmovql(Address dst, KRegister src); 1467 void kmovql(KRegister dst, Address src); 1468 void kmovql(KRegister dst, Register src); 1469 void kmovql(Register dst, KRegister src); 1470 1471 void knotwl(KRegister dst, KRegister src); 1472 1473 void kortestbl(KRegister dst, KRegister src); 1474 void kortestwl(KRegister dst, KRegister src); 1475 void kortestdl(KRegister dst, KRegister src); 1476 void kortestql(KRegister dst, KRegister src); 1477 1478 void ktestq(KRegister src1, KRegister src2); 1479 void ktestd(KRegister src1, KRegister src2); 1480 1481 void ktestql(KRegister dst, KRegister src); 1482 1483 void movdl(XMMRegister dst, Register src); 1484 void movdl(Register dst, XMMRegister src); 1485 void movdl(XMMRegister dst, Address src); 1486 void movdl(Address dst, XMMRegister src); 1487 1488 // Move Double Quadword 1489 void movdq(XMMRegister dst, Register src); 1490 void movdq(Register dst, XMMRegister src); 1491 1492 // Move Aligned Double Quadword 1493 void movdqa(XMMRegister dst, XMMRegister src); 1494 void movdqa(XMMRegister dst, Address src); 1495 1496 // Move Unaligned Double Quadword 1497 void movdqu(Address dst, XMMRegister src); 1498 void movdqu(XMMRegister dst, Address src); 1499 void movdqu(XMMRegister dst, XMMRegister src); 1500 1501 // Move Unaligned 256bit Vector 1502 void vmovdqu(Address dst, XMMRegister src); 1503 void vmovdqu(XMMRegister dst, Address src); 1504 void vmovdqu(XMMRegister dst, XMMRegister src); 1505 1506 // Move Unaligned 512bit Vector 1507 void evmovdqub(Address dst, XMMRegister src, int vector_len); 1508 void evmovdqub(XMMRegister dst, Address src, int vector_len); 1509 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len); 1510 void evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len); 1511 void evmovdquw(Address dst, XMMRegister src, int vector_len); 1512 void evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len); 1513 void evmovdquw(XMMRegister dst, Address src, int vector_len); 1514 void evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len); 1515 void evmovdqul(Address dst, XMMRegister src, int vector_len); 1516 void evmovdqul(XMMRegister dst, Address src, int vector_len); 1517 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len); 1518 void evmovdquq(Address dst, XMMRegister src, int vector_len); 1519 void evmovdquq(XMMRegister dst, Address src, int vector_len); 1520 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len); 1521 1522 // Move lower 64bit to high 64bit in 128bit register 1523 void movlhps(XMMRegister dst, XMMRegister src); 1524 1525 void movl(Register dst, int32_t imm32); 1526 void movl(Address dst, int32_t imm32); 1527 void movl(Register dst, Register src); 1528 void movl(Register dst, Address src); 1529 void movl(Address dst, Register src); 1530 1531 // These dummies prevent using movl from converting a zero (like NULL) into Register 1532 // by giving the compiler two choices it can't resolve 1533 1534 void movl(Address dst, void* junk); 1535 void movl(Register dst, void* junk); 1536 1537 #ifdef _LP64 1538 void movq(Register dst, Register src); 1539 void movq(Register dst, Address src); 1540 void movq(Address dst, Register src); 1541 1542 // These dummies prevent using movq from converting a zero (like NULL) into Register 1543 // by giving the compiler two choices it can't resolve 1544 1545 void movq(Address dst, void* dummy); 1546 void movq(Register dst, void* dummy); 1547 #endif 1548 1549 // Move Quadword 1550 void movq(Address dst, XMMRegister src); 1551 void movq(XMMRegister dst, Address src); 1552 1553 void movsbl(Register dst, Address src); 1554 void movsbl(Register dst, Register src); 1555 1556 #ifdef _LP64 1557 void movsbq(Register dst, Address src); 1558 void movsbq(Register dst, Register src); 1559 1560 // Move signed 32bit immediate to 64bit extending sign 1561 void movslq(Address dst, int32_t imm64); 1562 void movslq(Register dst, int32_t imm64); 1563 1564 void movslq(Register dst, Address src); 1565 void movslq(Register dst, Register src); 1566 void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous 1567 #endif 1568 1569 void movswl(Register dst, Address src); 1570 void movswl(Register dst, Register src); 1571 1572 #ifdef _LP64 1573 void movswq(Register dst, Address src); 1574 void movswq(Register dst, Register src); 1575 #endif 1576 1577 void movw(Address dst, int imm16); 1578 void movw(Register dst, Address src); 1579 void movw(Address dst, Register src); 1580 1581 void movzbl(Register dst, Address src); 1582 void movzbl(Register dst, Register src); 1583 1584 #ifdef _LP64 1585 void movzbq(Register dst, Address src); 1586 void movzbq(Register dst, Register src); 1587 #endif 1588 1589 void movzwl(Register dst, Address src); 1590 void movzwl(Register dst, Register src); 1591 1592 #ifdef _LP64 1593 void movzwq(Register dst, Address src); 1594 void movzwq(Register dst, Register src); 1595 #endif 1596 1597 // Unsigned multiply with RAX destination register 1598 void mull(Address src); 1599 void mull(Register src); 1600 1601 #ifdef _LP64 1602 void mulq(Address src); 1603 void mulq(Register src); 1604 void mulxq(Register dst1, Register dst2, Register src); 1605 #endif 1606 1607 // Multiply Scalar Double-Precision Floating-Point Values 1608 void mulsd(XMMRegister dst, Address src); 1609 void mulsd(XMMRegister dst, XMMRegister src); 1610 1611 // Multiply Scalar Single-Precision Floating-Point Values 1612 void mulss(XMMRegister dst, Address src); 1613 void mulss(XMMRegister dst, XMMRegister src); 1614 1615 void negl(Register dst); 1616 1617 #ifdef _LP64 1618 void negq(Register dst); 1619 #endif 1620 1621 void nop(int i = 1); 1622 1623 void notl(Register dst); 1624 1625 #ifdef _LP64 1626 void notq(Register dst); 1627 1628 void btsq(Address dst, int imm8); 1629 void btrq(Address dst, int imm8); 1630 #endif 1631 1632 void orl(Address dst, int32_t imm32); 1633 void orl(Register dst, int32_t imm32); 1634 void orl(Register dst, Address src); 1635 void orl(Register dst, Register src); 1636 void orl(Address dst, Register src); 1637 1638 void orb(Address dst, int imm8); 1639 1640 void orq(Address dst, int32_t imm32); 1641 void orq(Register dst, int32_t imm32); 1642 void orq(Register dst, Address src); 1643 void orq(Register dst, Register src); 1644 1645 // Pack with unsigned saturation 1646 void packuswb(XMMRegister dst, XMMRegister src); 1647 void packuswb(XMMRegister dst, Address src); 1648 void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1649 1650 // Pemutation of 64bit words 1651 void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len); 1652 void vpermq(XMMRegister dst, XMMRegister src, int imm8); 1653 void vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1654 void vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8); 1655 void vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8); 1656 void evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1657 1658 void pause(); 1659 1660 // Undefined Instruction 1661 void ud2(); 1662 1663 // SSE4.2 string instructions 1664 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); 1665 void pcmpestri(XMMRegister xmm1, Address src, int imm8); 1666 1667 void pcmpeqb(XMMRegister dst, XMMRegister src); 1668 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1669 void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1670 void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1671 void evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); 1672 1673 void evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1674 void evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); 1675 1676 void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len); 1677 void evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate of, int vector_len); 1678 void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len); 1679 1680 void pcmpeqw(XMMRegister dst, XMMRegister src); 1681 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1682 void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1683 void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1684 1685 void pcmpeqd(XMMRegister dst, XMMRegister src); 1686 void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1687 void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1688 void evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1689 1690 void pcmpeqq(XMMRegister dst, XMMRegister src); 1691 void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1692 void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1693 void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1694 1695 void pmovmskb(Register dst, XMMRegister src); 1696 void vpmovmskb(Register dst, XMMRegister src); 1697 1698 // SSE 4.1 extract 1699 void pextrd(Register dst, XMMRegister src, int imm8); 1700 void pextrq(Register dst, XMMRegister src, int imm8); 1701 void pextrd(Address dst, XMMRegister src, int imm8); 1702 void pextrq(Address dst, XMMRegister src, int imm8); 1703 void pextrb(Address dst, XMMRegister src, int imm8); 1704 // SSE 2 extract 1705 void pextrw(Register dst, XMMRegister src, int imm8); 1706 void pextrw(Address dst, XMMRegister src, int imm8); 1707 1708 // SSE 4.1 insert 1709 void pinsrd(XMMRegister dst, Register src, int imm8); 1710 void pinsrq(XMMRegister dst, Register src, int imm8); 1711 void pinsrd(XMMRegister dst, Address src, int imm8); 1712 void pinsrq(XMMRegister dst, Address src, int imm8); 1713 void pinsrb(XMMRegister dst, Address src, int imm8); 1714 // SSE 2 insert 1715 void pinsrw(XMMRegister dst, Register src, int imm8); 1716 void pinsrw(XMMRegister dst, Address src, int imm8); 1717 1718 // SSE4.1 packed move 1719 void pmovzxbw(XMMRegister dst, XMMRegister src); 1720 void pmovzxbw(XMMRegister dst, Address src); 1721 1722 void vpmovzxbw( XMMRegister dst, Address src, int vector_len); 1723 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len); 1724 void evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len); 1725 1726 void evpmovwb(Address dst, XMMRegister src, int vector_len); 1727 void evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len); 1728 1729 void vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len); 1730 1731 void evpmovdb(Address dst, XMMRegister src, int vector_len); 1732 1733 // Sign extend moves 1734 void pmovsxbw(XMMRegister dst, XMMRegister src); 1735 void vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len); 1736 1737 // Multiply add 1738 void pmaddwd(XMMRegister dst, XMMRegister src); 1739 void vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1740 // Multiply add accumulate 1741 void evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1742 1743 #ifndef _LP64 // no 32bit push/pop on amd64 1744 void popl(Address dst); 1745 #endif 1746 1747 #ifdef _LP64 1748 void popq(Address dst); 1749 #endif 1750 1751 void popcntl(Register dst, Address src); 1752 void popcntl(Register dst, Register src); 1753 1754 void vpopcntd(XMMRegister dst, XMMRegister src, int vector_len); 1755 1756 #ifdef _LP64 1757 void popcntq(Register dst, Address src); 1758 void popcntq(Register dst, Register src); 1759 #endif 1760 1761 // Prefetches (SSE, SSE2, 3DNOW only) 1762 1763 void prefetchnta(Address src); 1764 void prefetchr(Address src); 1765 void prefetcht0(Address src); 1766 void prefetcht1(Address src); 1767 void prefetcht2(Address src); 1768 void prefetchw(Address src); 1769 1770 // Shuffle Bytes 1771 void pshufb(XMMRegister dst, XMMRegister src); 1772 void pshufb(XMMRegister dst, Address src); 1773 void vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1774 1775 // Shuffle Packed Doublewords 1776 void pshufd(XMMRegister dst, XMMRegister src, int mode); 1777 void pshufd(XMMRegister dst, Address src, int mode); 1778 void vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len); 1779 1780 // Shuffle Packed Low Words 1781 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1782 void pshuflw(XMMRegister dst, Address src, int mode); 1783 1784 // Shuffle packed values at 128 bit granularity 1785 void evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len); 1786 1787 // Shift Right by bytes Logical DoubleQuadword Immediate 1788 void psrldq(XMMRegister dst, int shift); 1789 // Shift Left by bytes Logical DoubleQuadword Immediate 1790 void pslldq(XMMRegister dst, int shift); 1791 1792 // Logical Compare 128bit 1793 void ptest(XMMRegister dst, XMMRegister src); 1794 void ptest(XMMRegister dst, Address src); 1795 // Logical Compare 256bit 1796 void vptest(XMMRegister dst, XMMRegister src); 1797 void vptest(XMMRegister dst, Address src); 1798 1799 // Interleave Low Bytes 1800 void punpcklbw(XMMRegister dst, XMMRegister src); 1801 void punpcklbw(XMMRegister dst, Address src); 1802 1803 // Interleave Low Doublewords 1804 void punpckldq(XMMRegister dst, XMMRegister src); 1805 void punpckldq(XMMRegister dst, Address src); 1806 1807 // Interleave Low Quadwords 1808 void punpcklqdq(XMMRegister dst, XMMRegister src); 1809 1810 #ifndef _LP64 // no 32bit push/pop on amd64 1811 void pushl(Address src); 1812 #endif 1813 1814 void pushq(Address src); 1815 1816 void rcll(Register dst, int imm8); 1817 1818 void rclq(Register dst, int imm8); 1819 1820 void rcrq(Register dst, int imm8); 1821 1822 void rcpps(XMMRegister dst, XMMRegister src); 1823 1824 void rcpss(XMMRegister dst, XMMRegister src); 1825 1826 void rdtsc(); 1827 1828 void ret(int imm16); 1829 1830 #ifdef _LP64 1831 void rorq(Register dst, int imm8); 1832 void rorxq(Register dst, Register src, int imm8); 1833 void rorxd(Register dst, Register src, int imm8); 1834 #endif 1835 1836 void sahf(); 1837 1838 void sarl(Register dst, int imm8); 1839 void sarl(Register dst); 1840 1841 void sarq(Register dst, int imm8); 1842 void sarq(Register dst); 1843 1844 void sbbl(Address dst, int32_t imm32); 1845 void sbbl(Register dst, int32_t imm32); 1846 void sbbl(Register dst, Address src); 1847 void sbbl(Register dst, Register src); 1848 1849 void sbbq(Address dst, int32_t imm32); 1850 void sbbq(Register dst, int32_t imm32); 1851 void sbbq(Register dst, Address src); 1852 void sbbq(Register dst, Register src); 1853 1854 void setb(Condition cc, Register dst); 1855 1856 void palignr(XMMRegister dst, XMMRegister src, int imm8); 1857 void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, int imm8, int vector_len); 1858 void evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 1859 1860 void pblendw(XMMRegister dst, XMMRegister src, int imm8); 1861 1862 void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8); 1863 void sha1nexte(XMMRegister dst, XMMRegister src); 1864 void sha1msg1(XMMRegister dst, XMMRegister src); 1865 void sha1msg2(XMMRegister dst, XMMRegister src); 1866 // xmm0 is implicit additional source to the following instruction. 1867 void sha256rnds2(XMMRegister dst, XMMRegister src); 1868 void sha256msg1(XMMRegister dst, XMMRegister src); 1869 void sha256msg2(XMMRegister dst, XMMRegister src); 1870 1871 void shldl(Register dst, Register src); 1872 void shldl(Register dst, Register src, int8_t imm8); 1873 void shrdl(Register dst, Register src); 1874 void shrdl(Register dst, Register src, int8_t imm8); 1875 1876 void shll(Register dst, int imm8); 1877 void shll(Register dst); 1878 1879 void shlq(Register dst, int imm8); 1880 void shlq(Register dst); 1881 1882 void shrl(Register dst, int imm8); 1883 void shrl(Register dst); 1884 1885 void shrq(Register dst, int imm8); 1886 void shrq(Register dst); 1887 1888 void smovl(); // QQQ generic? 1889 1890 // Compute Square Root of Scalar Double-Precision Floating-Point Value 1891 void sqrtsd(XMMRegister dst, Address src); 1892 void sqrtsd(XMMRegister dst, XMMRegister src); 1893 1894 void roundsd(XMMRegister dst, Address src, int32_t rmode); 1895 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode); 1896 1897 // Compute Square Root of Scalar Single-Precision Floating-Point Value 1898 void sqrtss(XMMRegister dst, Address src); 1899 void sqrtss(XMMRegister dst, XMMRegister src); 1900 1901 void std(); 1902 1903 void stmxcsr( Address dst ); 1904 1905 void subl(Address dst, int32_t imm32); 1906 void subl(Address dst, Register src); 1907 void subl(Register dst, int32_t imm32); 1908 void subl(Register dst, Address src); 1909 void subl(Register dst, Register src); 1910 1911 void subq(Address dst, int32_t imm32); 1912 void subq(Address dst, Register src); 1913 void subq(Register dst, int32_t imm32); 1914 void subq(Register dst, Address src); 1915 void subq(Register dst, Register src); 1916 1917 // Force generation of a 4 byte immediate value even if it fits into 8bit 1918 void subl_imm32(Register dst, int32_t imm32); 1919 void subq_imm32(Register dst, int32_t imm32); 1920 1921 // Subtract Scalar Double-Precision Floating-Point Values 1922 void subsd(XMMRegister dst, Address src); 1923 void subsd(XMMRegister dst, XMMRegister src); 1924 1925 // Subtract Scalar Single-Precision Floating-Point Values 1926 void subss(XMMRegister dst, Address src); 1927 void subss(XMMRegister dst, XMMRegister src); 1928 1929 void testb(Register dst, int imm8); 1930 void testb(Address dst, int imm8); 1931 1932 void testl(Register dst, int32_t imm32); 1933 void testl(Register dst, Register src); 1934 void testl(Register dst, Address src); 1935 1936 void testq(Register dst, int32_t imm32); 1937 void testq(Register dst, Register src); 1938 void testq(Register dst, Address src); 1939 1940 // BMI - count trailing zeros 1941 void tzcntl(Register dst, Register src); 1942 void tzcntq(Register dst, Register src); 1943 1944 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 1945 void ucomisd(XMMRegister dst, Address src); 1946 void ucomisd(XMMRegister dst, XMMRegister src); 1947 1948 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 1949 void ucomiss(XMMRegister dst, Address src); 1950 void ucomiss(XMMRegister dst, XMMRegister src); 1951 1952 void xabort(int8_t imm8); 1953 1954 void xaddb(Address dst, Register src); 1955 void xaddw(Address dst, Register src); 1956 void xaddl(Address dst, Register src); 1957 void xaddq(Address dst, Register src); 1958 1959 void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none); 1960 1961 void xchgb(Register reg, Address adr); 1962 void xchgw(Register reg, Address adr); 1963 void xchgl(Register reg, Address adr); 1964 void xchgl(Register dst, Register src); 1965 1966 void xchgq(Register reg, Address adr); 1967 void xchgq(Register dst, Register src); 1968 1969 void xend(); 1970 1971 // Get Value of Extended Control Register 1972 void xgetbv(); 1973 1974 void xorl(Register dst, int32_t imm32); 1975 void xorl(Register dst, Address src); 1976 void xorl(Register dst, Register src); 1977 1978 void xorb(Register dst, Address src); 1979 1980 void xorq(Register dst, Address src); 1981 void xorq(Register dst, Register src); 1982 1983 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 1984 1985 // AVX 3-operands scalar instructions (encoded with VEX prefix) 1986 1987 void vaddsd(XMMRegister dst, XMMRegister nds, Address src); 1988 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1989 void vaddss(XMMRegister dst, XMMRegister nds, Address src); 1990 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1991 void vdivsd(XMMRegister dst, XMMRegister nds, Address src); 1992 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1993 void vdivss(XMMRegister dst, XMMRegister nds, Address src); 1994 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1995 void vfmadd231sd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1996 void vfmadd231ss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1997 void vmulsd(XMMRegister dst, XMMRegister nds, Address src); 1998 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1999 void vmulss(XMMRegister dst, XMMRegister nds, Address src); 2000 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src); 2001 void vsubsd(XMMRegister dst, XMMRegister nds, Address src); 2002 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 2003 void vsubss(XMMRegister dst, XMMRegister nds, Address src); 2004 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); 2005 2006 void vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src); 2007 void vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 2008 void vminss(XMMRegister dst, XMMRegister nds, XMMRegister src); 2009 void vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 2010 2011 void shlxl(Register dst, Register src1, Register src2); 2012 void shlxq(Register dst, Register src1, Register src2); 2013 2014 //====================VECTOR ARITHMETIC===================================== 2015 2016 // Add Packed Floating-Point Values 2017 void addpd(XMMRegister dst, XMMRegister src); 2018 void addpd(XMMRegister dst, Address src); 2019 void addps(XMMRegister dst, XMMRegister src); 2020 void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2021 void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2022 void vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2023 void vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2024 2025 // Subtract Packed Floating-Point Values 2026 void subpd(XMMRegister dst, XMMRegister src); 2027 void subps(XMMRegister dst, XMMRegister src); 2028 void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2029 void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2030 void vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2031 void vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2032 2033 // Multiply Packed Floating-Point Values 2034 void mulpd(XMMRegister dst, XMMRegister src); 2035 void mulpd(XMMRegister dst, Address src); 2036 void mulps(XMMRegister dst, XMMRegister src); 2037 void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2038 void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2039 void vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2040 void vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2041 2042 void vfmadd231pd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2043 void vfmadd231ps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2044 void vfmadd231pd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2045 void vfmadd231ps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2046 2047 // Divide Packed Floating-Point Values 2048 void divpd(XMMRegister dst, XMMRegister src); 2049 void divps(XMMRegister dst, XMMRegister src); 2050 void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2051 void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2052 void vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2053 void vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2054 2055 // Sqrt Packed Floating-Point Values 2056 void vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len); 2057 void vsqrtpd(XMMRegister dst, Address src, int vector_len); 2058 void vsqrtps(XMMRegister dst, XMMRegister src, int vector_len); 2059 void vsqrtps(XMMRegister dst, Address src, int vector_len); 2060 2061 // Round Packed Double precision value. 2062 void vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len); 2063 void vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len); 2064 void vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len); 2065 void vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len); 2066 2067 // Bitwise Logical AND of Packed Floating-Point Values 2068 void andpd(XMMRegister dst, XMMRegister src); 2069 void andps(XMMRegister dst, XMMRegister src); 2070 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2071 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2072 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2073 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2074 2075 void unpckhpd(XMMRegister dst, XMMRegister src); 2076 void unpcklpd(XMMRegister dst, XMMRegister src); 2077 2078 // Bitwise Logical XOR of Packed Floating-Point Values 2079 void xorpd(XMMRegister dst, XMMRegister src); 2080 void xorps(XMMRegister dst, XMMRegister src); 2081 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2082 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2083 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2084 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2085 2086 // Add horizontal packed integers 2087 void vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2088 void vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2089 void phaddw(XMMRegister dst, XMMRegister src); 2090 void phaddd(XMMRegister dst, XMMRegister src); 2091 2092 // Add packed integers 2093 void paddb(XMMRegister dst, XMMRegister src); 2094 void paddw(XMMRegister dst, XMMRegister src); 2095 void paddd(XMMRegister dst, XMMRegister src); 2096 void paddd(XMMRegister dst, Address src); 2097 void paddq(XMMRegister dst, XMMRegister src); 2098 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2099 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2100 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2101 void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2102 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2103 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2104 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2105 void vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2106 2107 // Sub packed integers 2108 void psubb(XMMRegister dst, XMMRegister src); 2109 void psubw(XMMRegister dst, XMMRegister src); 2110 void psubd(XMMRegister dst, XMMRegister src); 2111 void psubq(XMMRegister dst, XMMRegister src); 2112 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2113 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2114 void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2115 void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2116 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2117 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2118 void vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2119 void vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2120 2121 // Multiply packed integers (only shorts and ints) 2122 void pmullw(XMMRegister dst, XMMRegister src); 2123 void pmulld(XMMRegister dst, XMMRegister src); 2124 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2125 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2126 void vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2127 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2128 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2129 void vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2130 2131 // Shift left packed integers 2132 void psllw(XMMRegister dst, int shift); 2133 void pslld(XMMRegister dst, int shift); 2134 void psllq(XMMRegister dst, int shift); 2135 void psllw(XMMRegister dst, XMMRegister shift); 2136 void pslld(XMMRegister dst, XMMRegister shift); 2137 void psllq(XMMRegister dst, XMMRegister shift); 2138 void vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2139 void vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2140 void vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2141 void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2142 void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2143 void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2144 void vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2145 2146 // Logical shift right packed integers 2147 void psrlw(XMMRegister dst, int shift); 2148 void psrld(XMMRegister dst, int shift); 2149 void psrlq(XMMRegister dst, int shift); 2150 void psrlw(XMMRegister dst, XMMRegister shift); 2151 void psrld(XMMRegister dst, XMMRegister shift); 2152 void psrlq(XMMRegister dst, XMMRegister shift); 2153 void vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2154 void vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2155 void vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2156 void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2157 void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2158 void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2159 void vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2160 void evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2161 void evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2162 2163 // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs) 2164 void psraw(XMMRegister dst, int shift); 2165 void psrad(XMMRegister dst, int shift); 2166 void psraw(XMMRegister dst, XMMRegister shift); 2167 void psrad(XMMRegister dst, XMMRegister shift); 2168 void vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2169 void vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2170 void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2171 void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2172 void evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2173 void evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2174 2175 void vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2176 void vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2177 2178 // And packed integers 2179 void pand(XMMRegister dst, XMMRegister src); 2180 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2181 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2182 void vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2183 2184 // Andn packed integers 2185 void pandn(XMMRegister dst, XMMRegister src); 2186 void vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2187 2188 // Or packed integers 2189 void por(XMMRegister dst, XMMRegister src); 2190 void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2191 void vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2192 void vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2193 2194 // Xor packed integers 2195 void pxor(XMMRegister dst, XMMRegister src); 2196 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2197 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2198 void evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2199 void evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2200 2201 2202 // vinserti forms 2203 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2204 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2205 void vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2206 void vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2207 void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2208 2209 // vinsertf forms 2210 void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2211 void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2212 void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2213 void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2214 void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2215 void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2216 2217 // vextracti forms 2218 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8); 2219 void vextracti128(Address dst, XMMRegister src, uint8_t imm8); 2220 void vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2221 void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8); 2222 void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); 2223 void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2224 void vextracti64x4(Address dst, XMMRegister src, uint8_t imm8); 2225 2226 // vextractf forms 2227 void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8); 2228 void vextractf128(Address dst, XMMRegister src, uint8_t imm8); 2229 void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2230 void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8); 2231 void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); 2232 void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2233 void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8); 2234 2235 // xmm/mem sourced byte/word/dword/qword replicate 2236 void vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len); 2237 void vpbroadcastb(XMMRegister dst, Address src, int vector_len); 2238 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 2239 void vpbroadcastw(XMMRegister dst, Address src, int vector_len); 2240 void vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len); 2241 void vpbroadcastd(XMMRegister dst, Address src, int vector_len); 2242 void vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len); 2243 void vpbroadcastq(XMMRegister dst, Address src, int vector_len); 2244 2245 void evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len); 2246 void evbroadcasti64x2(XMMRegister dst, Address src, int vector_len); 2247 2248 // scalar single/double precision replicate 2249 void vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len); 2250 void vbroadcastss(XMMRegister dst, Address src, int vector_len); 2251 void vbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len); 2252 void vbroadcastsd(XMMRegister dst, Address src, int vector_len); 2253 2254 // gpr sourced byte/word/dword/qword replicate 2255 void evpbroadcastb(XMMRegister dst, Register src, int vector_len); 2256 void evpbroadcastw(XMMRegister dst, Register src, int vector_len); 2257 void evpbroadcastd(XMMRegister dst, Register src, int vector_len); 2258 void evpbroadcastq(XMMRegister dst, Register src, int vector_len); 2259 2260 void evpgatherdd(XMMRegister dst, KRegister k1, Address src, int vector_len); 2261 2262 // Carry-Less Multiplication Quadword 2263 void pclmulqdq(XMMRegister dst, XMMRegister src, int mask); 2264 void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask); 2265 void evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len); 2266 // AVX instruction which is used to clear upper 128 bits of YMM registers and 2267 // to avoid transaction penalty between AVX and SSE states. There is no 2268 // penalty if legacy SSE instructions are encoded using VEX prefix because 2269 // they always clear upper 128 bits. It should be used before calling 2270 // runtime code and native libraries. 2271 void vzeroupper(); 2272 2273 // AVX support for vectorized conditional move (float/double). The following two instructions used only coupled. 2274 void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len); 2275 void blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len); 2276 void cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len); 2277 void blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len); 2278 void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len); 2279 2280 protected: 2281 // Next instructions require address alignment 16 bytes SSE mode. 2282 // They should be called only from corresponding MacroAssembler instructions. 2283 void andpd(XMMRegister dst, Address src); 2284 void andps(XMMRegister dst, Address src); 2285 void xorpd(XMMRegister dst, Address src); 2286 void xorps(XMMRegister dst, Address src); 2287 2288 }; 2289 2290 // The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions. 2291 // Specific set functions are for specialized use, else defaults or whatever was supplied to object construction 2292 // are applied. 2293 class InstructionAttr { 2294 public: 2295 InstructionAttr( 2296 int vector_len, // The length of vector to be applied in encoding - for both AVX and EVEX 2297 bool rex_vex_w, // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true 2298 bool legacy_mode, // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX 2299 bool no_reg_mask, // when true, k0 is used when EVEX encoding is chosen, else embedded_opmask_register_specifier is used 2300 bool uses_vl) // This instruction may have legacy constraints based on vector length for EVEX 2301 : 2302 _rex_vex_w(rex_vex_w), 2303 _legacy_mode(legacy_mode || UseAVX < 3), 2304 _no_reg_mask(no_reg_mask), 2305 _uses_vl(uses_vl), 2306 _rex_vex_w_reverted(false), 2307 _is_evex_instruction(false), 2308 _is_clear_context(true), 2309 _is_extended_context(false), 2310 _avx_vector_len(vector_len), 2311 _tuple_type(Assembler::EVEX_ETUP), 2312 _input_size_in_bits(Assembler::EVEX_NObit), 2313 _evex_encoding(0), 2314 _embedded_opmask_register_specifier(0), // hard code k0 2315 _current_assembler(NULL) { } 2316 2317 ~InstructionAttr() { 2318 if (_current_assembler != NULL) { 2319 _current_assembler->clear_attributes(); 2320 } 2321 _current_assembler = NULL; 2322 } 2323 2324 private: 2325 bool _rex_vex_w; 2326 bool _legacy_mode; 2327 bool _no_reg_mask; 2328 bool _uses_vl; 2329 bool _rex_vex_w_reverted; 2330 bool _is_evex_instruction; 2331 bool _is_clear_context; 2332 bool _is_extended_context; 2333 int _avx_vector_len; 2334 int _tuple_type; 2335 int _input_size_in_bits; 2336 int _evex_encoding; 2337 int _embedded_opmask_register_specifier; 2338 2339 Assembler *_current_assembler; 2340 2341 public: 2342 // query functions for field accessors 2343 bool is_rex_vex_w(void) const { return _rex_vex_w; } 2344 bool is_legacy_mode(void) const { return _legacy_mode; } 2345 bool is_no_reg_mask(void) const { return _no_reg_mask; } 2346 bool uses_vl(void) const { return _uses_vl; } 2347 bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; } 2348 bool is_evex_instruction(void) const { return _is_evex_instruction; } 2349 bool is_clear_context(void) const { return _is_clear_context; } 2350 bool is_extended_context(void) const { return _is_extended_context; } 2351 int get_vector_len(void) const { return _avx_vector_len; } 2352 int get_tuple_type(void) const { return _tuple_type; } 2353 int get_input_size(void) const { return _input_size_in_bits; } 2354 int get_evex_encoding(void) const { return _evex_encoding; } 2355 int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; } 2356 2357 // Set the vector len manually 2358 void set_vector_len(int vector_len) { _avx_vector_len = vector_len; } 2359 2360 // Set revert rex_vex_w for avx encoding 2361 void set_rex_vex_w_reverted(void) { _rex_vex_w_reverted = true; } 2362 2363 // Set rex_vex_w based on state 2364 void set_rex_vex_w(bool state) { _rex_vex_w = state; } 2365 2366 // Set the instruction to be encoded in AVX mode 2367 void set_is_legacy_mode(void) { _legacy_mode = true; } 2368 2369 // Set the current instuction to be encoded as an EVEX instuction 2370 void set_is_evex_instruction(void) { _is_evex_instruction = true; } 2371 2372 // Internal encoding data used in compressed immediate offset programming 2373 void set_evex_encoding(int value) { _evex_encoding = value; } 2374 2375 // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components 2376 void reset_is_clear_context(void) { _is_clear_context = false; } 2377 2378 // Map back to current asembler so that we can manage object level assocation 2379 void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; } 2380 2381 // Address modifiers used for compressed displacement calculation 2382 void set_address_attributes(int tuple_type, int input_size_in_bits) { 2383 if (VM_Version::supports_evex()) { 2384 _tuple_type = tuple_type; 2385 _input_size_in_bits = input_size_in_bits; 2386 } 2387 } 2388 2389 // Set embedded opmask register specifier. 2390 void set_embedded_opmask_register_specifier(KRegister mask) { 2391 _embedded_opmask_register_specifier = (*mask).encoding() & 0x7; 2392 } 2393 2394 }; 2395 2396 #endif // CPU_X86_ASSEMBLER_X86_HPP