1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP 26 #define CPU_X86_VM_ASSEMBLER_X86_HPP 27 28 #include "asm/register.hpp" 29 #include "vm_version_x86.hpp" 30 31 class BiasedLockingCounters; 32 33 // Contains all the definitions needed for x86 assembly code generation. 34 35 // Calling convention 36 class Argument VALUE_OBJ_CLASS_SPEC { 37 public: 38 enum { 39 #ifdef _LP64 40 #ifdef _WIN64 41 n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 42 n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) 43 #else 44 n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 45 n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) 46 #endif // _WIN64 47 n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... 48 n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... 49 #else 50 n_register_parameters = 0 // 0 registers used to pass arguments 51 #endif // _LP64 52 }; 53 }; 54 55 56 #ifdef _LP64 57 // Symbolically name the register arguments used by the c calling convention. 58 // Windows is different from linux/solaris. So much for standards... 59 60 #ifdef _WIN64 61 62 REGISTER_DECLARATION(Register, c_rarg0, rcx); 63 REGISTER_DECLARATION(Register, c_rarg1, rdx); 64 REGISTER_DECLARATION(Register, c_rarg2, r8); 65 REGISTER_DECLARATION(Register, c_rarg3, r9); 66 67 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 68 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 69 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 70 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 71 72 #else 73 74 REGISTER_DECLARATION(Register, c_rarg0, rdi); 75 REGISTER_DECLARATION(Register, c_rarg1, rsi); 76 REGISTER_DECLARATION(Register, c_rarg2, rdx); 77 REGISTER_DECLARATION(Register, c_rarg3, rcx); 78 REGISTER_DECLARATION(Register, c_rarg4, r8); 79 REGISTER_DECLARATION(Register, c_rarg5, r9); 80 81 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 82 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 83 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 84 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 85 REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); 86 REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); 87 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); 88 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); 89 90 #endif // _WIN64 91 92 // Symbolically name the register arguments used by the Java calling convention. 93 // We have control over the convention for java so we can do what we please. 94 // What pleases us is to offset the java calling convention so that when 95 // we call a suitable jni method the arguments are lined up and we don't 96 // have to do little shuffling. A suitable jni method is non-static and a 97 // small number of arguments (two fewer args on windows) 98 // 99 // |-------------------------------------------------------| 100 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | 101 // |-------------------------------------------------------| 102 // | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) 103 // | rdi rsi rdx rcx r8 r9 | solaris/linux 104 // |-------------------------------------------------------| 105 // | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | 106 // |-------------------------------------------------------| 107 108 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); 109 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); 110 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); 111 // Windows runs out of register args here 112 #ifdef _WIN64 113 REGISTER_DECLARATION(Register, j_rarg3, rdi); 114 REGISTER_DECLARATION(Register, j_rarg4, rsi); 115 #else 116 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); 117 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); 118 #endif /* _WIN64 */ 119 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); 120 121 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); 122 REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); 123 REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); 124 REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); 125 REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); 126 REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); 127 REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); 128 REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); 129 130 REGISTER_DECLARATION(Register, rscratch1, r10); // volatile 131 REGISTER_DECLARATION(Register, rscratch2, r11); // volatile 132 133 REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved 134 REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved 135 136 #else 137 // rscratch1 will apear in 32bit code that is dead but of course must compile 138 // Using noreg ensures if the dead code is incorrectly live and executed it 139 // will cause an assertion failure 140 #define rscratch1 noreg 141 #define rscratch2 noreg 142 143 #endif // _LP64 144 145 // JSR 292 146 // On x86, the SP does not have to be saved when invoking method handle intrinsics 147 // or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg. 148 REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg); 149 150 // Address is an abstraction used to represent a memory location 151 // using any of the amd64 addressing modes with one object. 152 // 153 // Note: A register location is represented via a Register, not 154 // via an address for efficiency & simplicity reasons. 155 156 class ArrayAddress; 157 158 class Address VALUE_OBJ_CLASS_SPEC { 159 public: 160 enum ScaleFactor { 161 no_scale = -1, 162 times_1 = 0, 163 times_2 = 1, 164 times_4 = 2, 165 times_8 = 3, 166 times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4) 167 }; 168 static ScaleFactor times(int size) { 169 assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size"); 170 if (size == 8) return times_8; 171 if (size == 4) return times_4; 172 if (size == 2) return times_2; 173 return times_1; 174 } 175 static int scale_size(ScaleFactor scale) { 176 assert(scale != no_scale, ""); 177 assert(((1 << (int)times_1) == 1 && 178 (1 << (int)times_2) == 2 && 179 (1 << (int)times_4) == 4 && 180 (1 << (int)times_8) == 8), ""); 181 return (1 << (int)scale); 182 } 183 184 private: 185 Register _base; 186 Register _index; 187 ScaleFactor _scale; 188 int _disp; 189 RelocationHolder _rspec; 190 191 // Easily misused constructors make them private 192 // %%% can we make these go away? 193 NOT_LP64(Address(address loc, RelocationHolder spec);) 194 Address(int disp, address loc, relocInfo::relocType rtype); 195 Address(int disp, address loc, RelocationHolder spec); 196 197 public: 198 199 int disp() { return _disp; } 200 // creation 201 Address() 202 : _base(noreg), 203 _index(noreg), 204 _scale(no_scale), 205 _disp(0) { 206 } 207 208 // No default displacement otherwise Register can be implicitly 209 // converted to 0(Register) which is quite a different animal. 210 211 Address(Register base, int disp) 212 : _base(base), 213 _index(noreg), 214 _scale(no_scale), 215 _disp(disp) { 216 } 217 218 Address(Register base, Register index, ScaleFactor scale, int disp = 0) 219 : _base (base), 220 _index(index), 221 _scale(scale), 222 _disp (disp) { 223 assert(!index->is_valid() == (scale == Address::no_scale), 224 "inconsistent address"); 225 } 226 227 Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0) 228 : _base (base), 229 _index(index.register_or_noreg()), 230 _scale(scale), 231 _disp (disp + (index.constant_or_zero() * scale_size(scale))) { 232 if (!index.is_register()) scale = Address::no_scale; 233 assert(!_index->is_valid() == (scale == Address::no_scale), 234 "inconsistent address"); 235 } 236 237 Address plus_disp(int disp) const { 238 Address a = (*this); 239 a._disp += disp; 240 return a; 241 } 242 Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const { 243 Address a = (*this); 244 a._disp += disp.constant_or_zero() * scale_size(scale); 245 if (disp.is_register()) { 246 assert(!a.index()->is_valid(), "competing indexes"); 247 a._index = disp.as_register(); 248 a._scale = scale; 249 } 250 return a; 251 } 252 bool is_same_address(Address a) const { 253 // disregard _rspec 254 return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale; 255 } 256 257 // The following two overloads are used in connection with the 258 // ByteSize type (see sizes.hpp). They simplify the use of 259 // ByteSize'd arguments in assembly code. Note that their equivalent 260 // for the optimized build are the member functions with int disp 261 // argument since ByteSize is mapped to an int type in that case. 262 // 263 // Note: DO NOT introduce similar overloaded functions for WordSize 264 // arguments as in the optimized mode, both ByteSize and WordSize 265 // are mapped to the same type and thus the compiler cannot make a 266 // distinction anymore (=> compiler errors). 267 268 #ifdef ASSERT 269 Address(Register base, ByteSize disp) 270 : _base(base), 271 _index(noreg), 272 _scale(no_scale), 273 _disp(in_bytes(disp)) { 274 } 275 276 Address(Register base, Register index, ScaleFactor scale, ByteSize disp) 277 : _base(base), 278 _index(index), 279 _scale(scale), 280 _disp(in_bytes(disp)) { 281 assert(!index->is_valid() == (scale == Address::no_scale), 282 "inconsistent address"); 283 } 284 285 Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp) 286 : _base (base), 287 _index(index.register_or_noreg()), 288 _scale(scale), 289 _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) { 290 if (!index.is_register()) scale = Address::no_scale; 291 assert(!_index->is_valid() == (scale == Address::no_scale), 292 "inconsistent address"); 293 } 294 295 #endif // ASSERT 296 297 // accessors 298 bool uses(Register reg) const { return _base == reg || _index == reg; } 299 Register base() const { return _base; } 300 Register index() const { return _index; } 301 ScaleFactor scale() const { return _scale; } 302 int disp() const { return _disp; } 303 304 // Convert the raw encoding form into the form expected by the constructor for 305 // Address. An index of 4 (rsp) corresponds to having no index, so convert 306 // that to noreg for the Address constructor. 307 static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc); 308 309 static Address make_array(ArrayAddress); 310 311 private: 312 bool base_needs_rex() const { 313 return _base != noreg && _base->encoding() >= 8; 314 } 315 316 bool index_needs_rex() const { 317 return _index != noreg &&_index->encoding() >= 8; 318 } 319 320 relocInfo::relocType reloc() const { return _rspec.type(); } 321 322 friend class Assembler; 323 friend class MacroAssembler; 324 friend class LIR_Assembler; // base/index/scale/disp 325 }; 326 327 // 328 // AddressLiteral has been split out from Address because operands of this type 329 // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out 330 // the few instructions that need to deal with address literals are unique and the 331 // MacroAssembler does not have to implement every instruction in the Assembler 332 // in order to search for address literals that may need special handling depending 333 // on the instruction and the platform. As small step on the way to merging i486/amd64 334 // directories. 335 // 336 class AddressLiteral VALUE_OBJ_CLASS_SPEC { 337 friend class ArrayAddress; 338 RelocationHolder _rspec; 339 // Typically we use AddressLiterals we want to use their rval 340 // However in some situations we want the lval (effect address) of the item. 341 // We provide a special factory for making those lvals. 342 bool _is_lval; 343 344 // If the target is far we'll need to load the ea of this to 345 // a register to reach it. Otherwise if near we can do rip 346 // relative addressing. 347 348 address _target; 349 350 protected: 351 // creation 352 AddressLiteral() 353 : _is_lval(false), 354 _target(NULL) 355 {} 356 357 public: 358 359 360 AddressLiteral(address target, relocInfo::relocType rtype); 361 362 AddressLiteral(address target, RelocationHolder const& rspec) 363 : _rspec(rspec), 364 _is_lval(false), 365 _target(target) 366 {} 367 368 AddressLiteral addr() { 369 AddressLiteral ret = *this; 370 ret._is_lval = true; 371 return ret; 372 } 373 374 375 private: 376 377 address target() { return _target; } 378 bool is_lval() { return _is_lval; } 379 380 relocInfo::relocType reloc() const { return _rspec.type(); } 381 const RelocationHolder& rspec() const { return _rspec; } 382 383 friend class Assembler; 384 friend class MacroAssembler; 385 friend class Address; 386 friend class LIR_Assembler; 387 }; 388 389 // Convience classes 390 class RuntimeAddress: public AddressLiteral { 391 392 public: 393 394 RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} 395 396 }; 397 398 class ExternalAddress: public AddressLiteral { 399 private: 400 static relocInfo::relocType reloc_for_target(address target) { 401 // Sometimes ExternalAddress is used for values which aren't 402 // exactly addresses, like the card table base. 403 // external_word_type can't be used for values in the first page 404 // so just skip the reloc in that case. 405 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 406 } 407 408 public: 409 410 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {} 411 412 }; 413 414 class InternalAddress: public AddressLiteral { 415 416 public: 417 418 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} 419 420 }; 421 422 // x86 can do array addressing as a single operation since disp can be an absolute 423 // address amd64 can't. We create a class that expresses the concept but does extra 424 // magic on amd64 to get the final result 425 426 class ArrayAddress VALUE_OBJ_CLASS_SPEC { 427 private: 428 429 AddressLiteral _base; 430 Address _index; 431 432 public: 433 434 ArrayAddress() {}; 435 ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; 436 AddressLiteral base() { return _base; } 437 Address index() { return _index; } 438 439 }; 440 441 class InstructionAttr; 442 443 // 64-bit refect the fxsave size which is 512 bytes and the new xsave area on EVEX which is another 2176 bytes 444 // See fxsave and xsave(EVEX enabled) documentation for layout 445 const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY(2688 / wordSize); 446 447 // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction 448 // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write 449 // is what you get. The Assembler is generating code into a CodeBuffer. 450 451 class Assembler : public AbstractAssembler { 452 friend class AbstractAssembler; // for the non-virtual hack 453 friend class LIR_Assembler; // as_Address() 454 friend class StubGenerator; 455 456 public: 457 enum Condition { // The x86 condition codes used for conditional jumps/moves. 458 zero = 0x4, 459 notZero = 0x5, 460 equal = 0x4, 461 notEqual = 0x5, 462 less = 0xc, 463 lessEqual = 0xe, 464 greater = 0xf, 465 greaterEqual = 0xd, 466 below = 0x2, 467 belowEqual = 0x6, 468 above = 0x7, 469 aboveEqual = 0x3, 470 overflow = 0x0, 471 noOverflow = 0x1, 472 carrySet = 0x2, 473 carryClear = 0x3, 474 negative = 0x8, 475 positive = 0x9, 476 parity = 0xa, 477 noParity = 0xb 478 }; 479 480 enum Prefix { 481 // segment overrides 482 CS_segment = 0x2e, 483 SS_segment = 0x36, 484 DS_segment = 0x3e, 485 ES_segment = 0x26, 486 FS_segment = 0x64, 487 GS_segment = 0x65, 488 489 REX = 0x40, 490 491 REX_B = 0x41, 492 REX_X = 0x42, 493 REX_XB = 0x43, 494 REX_R = 0x44, 495 REX_RB = 0x45, 496 REX_RX = 0x46, 497 REX_RXB = 0x47, 498 499 REX_W = 0x48, 500 501 REX_WB = 0x49, 502 REX_WX = 0x4A, 503 REX_WXB = 0x4B, 504 REX_WR = 0x4C, 505 REX_WRB = 0x4D, 506 REX_WRX = 0x4E, 507 REX_WRXB = 0x4F, 508 509 VEX_3bytes = 0xC4, 510 VEX_2bytes = 0xC5, 511 EVEX_4bytes = 0x62, 512 Prefix_EMPTY = 0x0 513 }; 514 515 enum VexPrefix { 516 VEX_B = 0x20, 517 VEX_X = 0x40, 518 VEX_R = 0x80, 519 VEX_W = 0x80 520 }; 521 522 enum ExexPrefix { 523 EVEX_F = 0x04, 524 EVEX_V = 0x08, 525 EVEX_Rb = 0x10, 526 EVEX_X = 0x40, 527 EVEX_Z = 0x80 528 }; 529 530 enum VexSimdPrefix { 531 VEX_SIMD_NONE = 0x0, 532 VEX_SIMD_66 = 0x1, 533 VEX_SIMD_F3 = 0x2, 534 VEX_SIMD_F2 = 0x3 535 }; 536 537 enum VexOpcode { 538 VEX_OPCODE_NONE = 0x0, 539 VEX_OPCODE_0F = 0x1, 540 VEX_OPCODE_0F_38 = 0x2, 541 VEX_OPCODE_0F_3A = 0x3, 542 VEX_OPCODE_MASK = 0x1F 543 }; 544 545 enum AvxVectorLen { 546 AVX_128bit = 0x0, 547 AVX_256bit = 0x1, 548 AVX_512bit = 0x2, 549 AVX_NoVec = 0x4 550 }; 551 552 enum EvexTupleType { 553 EVEX_FV = 0, 554 EVEX_HV = 4, 555 EVEX_FVM = 6, 556 EVEX_T1S = 7, 557 EVEX_T1F = 11, 558 EVEX_T2 = 13, 559 EVEX_T4 = 15, 560 EVEX_T8 = 17, 561 EVEX_HVM = 18, 562 EVEX_QVM = 19, 563 EVEX_OVM = 20, 564 EVEX_M128 = 21, 565 EVEX_DUP = 22, 566 EVEX_ETUP = 23 567 }; 568 569 enum EvexInputSizeInBits { 570 EVEX_8bit = 0, 571 EVEX_16bit = 1, 572 EVEX_32bit = 2, 573 EVEX_64bit = 3, 574 EVEX_NObit = 4 575 }; 576 577 enum WhichOperand { 578 // input to locate_operand, and format code for relocations 579 imm_operand = 0, // embedded 32-bit|64-bit immediate operand 580 disp32_operand = 1, // embedded 32-bit displacement or address 581 call32_operand = 2, // embedded 32-bit self-relative displacement 582 #ifndef _LP64 583 _WhichOperand_limit = 3 584 #else 585 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop 586 _WhichOperand_limit = 4 587 #endif 588 }; 589 590 enum ComparisonPredicate { 591 eq = 0, 592 lt = 1, 593 le = 2, 594 _false = 3, 595 neq = 4, 596 nlt = 5, 597 nle = 6, 598 _true = 7 599 }; 600 601 602 // NOTE: The general philopsophy of the declarations here is that 64bit versions 603 // of instructions are freely declared without the need for wrapping them an ifdef. 604 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.) 605 // In the .cpp file the implementations are wrapped so that they are dropped out 606 // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL 607 // to the size it was prior to merging up the 32bit and 64bit assemblers. 608 // 609 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction 610 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down. 611 612 private: 613 614 bool _legacy_mode_bw; 615 bool _legacy_mode_dq; 616 bool _legacy_mode_vl; 617 bool _legacy_mode_vlbw; 618 bool _is_managed; 619 bool _vector_masking; // For stub code use only 620 621 class InstructionAttr *_attributes; 622 623 // 64bit prefixes 624 int prefix_and_encode(int reg_enc, bool byteinst = false); 625 int prefixq_and_encode(int reg_enc); 626 627 int prefix_and_encode(int dst_enc, int src_enc) { 628 return prefix_and_encode(dst_enc, false, src_enc, false); 629 } 630 int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte); 631 int prefixq_and_encode(int dst_enc, int src_enc); 632 633 void prefix(Register reg); 634 void prefix(Register dst, Register src, Prefix p); 635 void prefix(Register dst, Address adr, Prefix p); 636 void prefix(Address adr); 637 void prefixq(Address adr); 638 639 void prefix(Address adr, Register reg, bool byteinst = false); 640 void prefix(Address adr, XMMRegister reg); 641 void prefixq(Address adr, Register reg); 642 void prefixq(Address adr, XMMRegister reg); 643 644 void prefetch_prefix(Address src); 645 646 void rex_prefix(Address adr, XMMRegister xreg, 647 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 648 int rex_prefix_and_encode(int dst_enc, int src_enc, 649 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 650 651 void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc); 652 653 void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, 654 int nds_enc, VexSimdPrefix pre, VexOpcode opc); 655 656 void vex_prefix(Address adr, int nds_enc, int xreg_enc, 657 VexSimdPrefix pre, VexOpcode opc, 658 InstructionAttr *attributes); 659 660 int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, 661 VexSimdPrefix pre, VexOpcode opc, 662 InstructionAttr *attributes); 663 664 void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 665 VexOpcode opc, InstructionAttr *attributes); 666 667 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 668 VexOpcode opc, InstructionAttr *attributes); 669 670 // Helper functions for groups of instructions 671 void emit_arith_b(int op1, int op2, Register dst, int imm8); 672 673 void emit_arith(int op1, int op2, Register dst, int32_t imm32); 674 // Force generation of a 4 byte immediate value even if it fits into 8bit 675 void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32); 676 void emit_arith(int op1, int op2, Register dst, Register src); 677 678 bool emit_compressed_disp_byte(int &disp); 679 680 void emit_operand(Register reg, 681 Register base, Register index, Address::ScaleFactor scale, 682 int disp, 683 RelocationHolder const& rspec, 684 int rip_relative_correction = 0); 685 686 void emit_operand(Register reg, Address adr, int rip_relative_correction = 0); 687 688 // operands that only take the original 32bit registers 689 void emit_operand32(Register reg, Address adr); 690 691 void emit_operand(XMMRegister reg, 692 Register base, Register index, Address::ScaleFactor scale, 693 int disp, 694 RelocationHolder const& rspec); 695 696 void emit_operand(XMMRegister reg, Address adr); 697 698 void emit_operand(MMXRegister reg, Address adr); 699 700 // workaround gcc (3.2.1-7) bug 701 void emit_operand(Address adr, MMXRegister reg); 702 703 704 // Immediate-to-memory forms 705 void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32); 706 707 void emit_farith(int b1, int b2, int i); 708 709 710 protected: 711 #ifdef ASSERT 712 void check_relocation(RelocationHolder const& rspec, int format); 713 #endif 714 715 void emit_data(jint data, relocInfo::relocType rtype, int format); 716 void emit_data(jint data, RelocationHolder const& rspec, int format); 717 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 718 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 719 720 bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); 721 722 // These are all easily abused and hence protected 723 724 // 32BIT ONLY SECTION 725 #ifndef _LP64 726 // Make these disappear in 64bit mode since they would never be correct 727 void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 728 void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 729 730 void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 731 void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 732 733 void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 734 #else 735 // 64BIT ONLY SECTION 736 void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY 737 738 void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec); 739 void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec); 740 741 void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec); 742 void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec); 743 #endif // _LP64 744 745 // These are unique in that we are ensured by the caller that the 32bit 746 // relative in these instructions will always be able to reach the potentially 747 // 64bit address described by entry. Since they can take a 64bit address they 748 // don't have the 32 suffix like the other instructions in this class. 749 750 void call_literal(address entry, RelocationHolder const& rspec); 751 void jmp_literal(address entry, RelocationHolder const& rspec); 752 753 // Avoid using directly section 754 // Instructions in this section are actually usable by anyone without danger 755 // of failure but have performance issues that are addressed my enhanced 756 // instructions which will do the proper thing base on the particular cpu. 757 // We protect them because we don't trust you... 758 759 // Don't use next inc() and dec() methods directly. INC & DEC instructions 760 // could cause a partial flag stall since they don't set CF flag. 761 // Use MacroAssembler::decrement() & MacroAssembler::increment() methods 762 // which call inc() & dec() or add() & sub() in accordance with 763 // the product flag UseIncDec value. 764 765 void decl(Register dst); 766 void decl(Address dst); 767 void decq(Register dst); 768 void decq(Address dst); 769 770 void incl(Register dst); 771 void incl(Address dst); 772 void incq(Register dst); 773 void incq(Address dst); 774 775 // New cpus require use of movsd and movss to avoid partial register stall 776 // when loading from memory. But for old Opteron use movlpd instead of movsd. 777 // The selection is done in MacroAssembler::movdbl() and movflt(). 778 779 // Move Scalar Single-Precision Floating-Point Values 780 void movss(XMMRegister dst, Address src); 781 void movss(XMMRegister dst, XMMRegister src); 782 void movss(Address dst, XMMRegister src); 783 784 // Move Scalar Double-Precision Floating-Point Values 785 void movsd(XMMRegister dst, Address src); 786 void movsd(XMMRegister dst, XMMRegister src); 787 void movsd(Address dst, XMMRegister src); 788 void movlpd(XMMRegister dst, Address src); 789 790 // New cpus require use of movaps and movapd to avoid partial register stall 791 // when moving between registers. 792 void movaps(XMMRegister dst, XMMRegister src); 793 void movapd(XMMRegister dst, XMMRegister src); 794 795 // End avoid using directly 796 797 798 // Instruction prefixes 799 void prefix(Prefix p); 800 801 public: 802 803 // Creation 804 Assembler(CodeBuffer* code) : AbstractAssembler(code) { 805 init_attributes(); 806 } 807 808 // Decoding 809 static address locate_operand(address inst, WhichOperand which); 810 static address locate_next_instruction(address inst); 811 812 // Utilities 813 static bool is_polling_page_far() NOT_LP64({ return false;}); 814 static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 815 int cur_tuple_type, int in_size_in_bits, int cur_encoding); 816 817 // Generic instructions 818 // Does 32bit or 64bit as needed for the platform. In some sense these 819 // belong in macro assembler but there is no need for both varieties to exist 820 821 void init_attributes(void) { 822 _legacy_mode_bw = (VM_Version::supports_avx512bw() == false); 823 _legacy_mode_dq = (VM_Version::supports_avx512dq() == false); 824 _legacy_mode_vl = (VM_Version::supports_avx512vl() == false); 825 _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false); 826 _is_managed = false; 827 _vector_masking = false; 828 _attributes = NULL; 829 } 830 831 void set_attributes(InstructionAttr *attributes) { _attributes = attributes; } 832 void clear_attributes(void) { _attributes = NULL; } 833 834 void set_managed(void) { _is_managed = true; } 835 void clear_managed(void) { _is_managed = false; } 836 bool is_managed(void) { return _is_managed; } 837 838 // Following functions are for stub code use only 839 void set_vector_masking(void) { _vector_masking = true; } 840 void clear_vector_masking(void) { _vector_masking = false; } 841 bool is_vector_masking(void) { return _vector_masking; } 842 843 void lea(Register dst, Address src); 844 845 void mov(Register dst, Register src); 846 847 void pusha(); 848 void popa(); 849 850 void pushf(); 851 void popf(); 852 853 void push(int32_t imm32); 854 855 void push(Register src); 856 857 void pop(Register dst); 858 859 // These are dummies to prevent surprise implicit conversions to Register 860 void push(void* v); 861 void pop(void* v); 862 863 // These do register sized moves/scans 864 void rep_mov(); 865 void rep_stos(); 866 void rep_stosb(); 867 void repne_scan(); 868 #ifdef _LP64 869 void repne_scanl(); 870 #endif 871 872 // Vanilla instructions in lexical order 873 874 void adcl(Address dst, int32_t imm32); 875 void adcl(Address dst, Register src); 876 void adcl(Register dst, int32_t imm32); 877 void adcl(Register dst, Address src); 878 void adcl(Register dst, Register src); 879 880 void adcq(Register dst, int32_t imm32); 881 void adcq(Register dst, Address src); 882 void adcq(Register dst, Register src); 883 884 void addl(Address dst, int32_t imm32); 885 void addl(Address dst, Register src); 886 void addl(Register dst, int32_t imm32); 887 void addl(Register dst, Address src); 888 void addl(Register dst, Register src); 889 890 void addq(Address dst, int32_t imm32); 891 void addq(Address dst, Register src); 892 void addq(Register dst, int32_t imm32); 893 void addq(Register dst, Address src); 894 void addq(Register dst, Register src); 895 896 #ifdef _LP64 897 //Add Unsigned Integers with Carry Flag 898 void adcxq(Register dst, Register src); 899 900 //Add Unsigned Integers with Overflow Flag 901 void adoxq(Register dst, Register src); 902 #endif 903 904 void addr_nop_4(); 905 void addr_nop_5(); 906 void addr_nop_7(); 907 void addr_nop_8(); 908 909 // Add Scalar Double-Precision Floating-Point Values 910 void addsd(XMMRegister dst, Address src); 911 void addsd(XMMRegister dst, XMMRegister src); 912 913 // Add Scalar Single-Precision Floating-Point Values 914 void addss(XMMRegister dst, Address src); 915 void addss(XMMRegister dst, XMMRegister src); 916 917 // AES instructions 918 void aesdec(XMMRegister dst, Address src); 919 void aesdec(XMMRegister dst, XMMRegister src); 920 void aesdeclast(XMMRegister dst, Address src); 921 void aesdeclast(XMMRegister dst, XMMRegister src); 922 void aesenc(XMMRegister dst, Address src); 923 void aesenc(XMMRegister dst, XMMRegister src); 924 void aesenclast(XMMRegister dst, Address src); 925 void aesenclast(XMMRegister dst, XMMRegister src); 926 927 928 void andl(Address dst, int32_t imm32); 929 void andl(Register dst, int32_t imm32); 930 void andl(Register dst, Address src); 931 void andl(Register dst, Register src); 932 933 void andq(Address dst, int32_t imm32); 934 void andq(Register dst, int32_t imm32); 935 void andq(Register dst, Address src); 936 void andq(Register dst, Register src); 937 938 // BMI instructions 939 void andnl(Register dst, Register src1, Register src2); 940 void andnl(Register dst, Register src1, Address src2); 941 void andnq(Register dst, Register src1, Register src2); 942 void andnq(Register dst, Register src1, Address src2); 943 944 void blsil(Register dst, Register src); 945 void blsil(Register dst, Address src); 946 void blsiq(Register dst, Register src); 947 void blsiq(Register dst, Address src); 948 949 void blsmskl(Register dst, Register src); 950 void blsmskl(Register dst, Address src); 951 void blsmskq(Register dst, Register src); 952 void blsmskq(Register dst, Address src); 953 954 void blsrl(Register dst, Register src); 955 void blsrl(Register dst, Address src); 956 void blsrq(Register dst, Register src); 957 void blsrq(Register dst, Address src); 958 959 void bsfl(Register dst, Register src); 960 void bsrl(Register dst, Register src); 961 962 #ifdef _LP64 963 void bsfq(Register dst, Register src); 964 void bsrq(Register dst, Register src); 965 #endif 966 967 void bswapl(Register reg); 968 969 void bswapq(Register reg); 970 971 void call(Label& L, relocInfo::relocType rtype); 972 void call(Register reg); // push pc; pc <- reg 973 void call(Address adr); // push pc; pc <- adr 974 975 void cdql(); 976 977 void cdqq(); 978 979 void cld(); 980 981 void clflush(Address adr); 982 983 void cmovl(Condition cc, Register dst, Register src); 984 void cmovl(Condition cc, Register dst, Address src); 985 986 void cmovq(Condition cc, Register dst, Register src); 987 void cmovq(Condition cc, Register dst, Address src); 988 989 990 void cmpb(Address dst, int imm8); 991 992 void cmpl(Address dst, int32_t imm32); 993 994 void cmpl(Register dst, int32_t imm32); 995 void cmpl(Register dst, Register src); 996 void cmpl(Register dst, Address src); 997 998 void cmpq(Address dst, int32_t imm32); 999 void cmpq(Address dst, Register src); 1000 1001 void cmpq(Register dst, int32_t imm32); 1002 void cmpq(Register dst, Register src); 1003 void cmpq(Register dst, Address src); 1004 1005 // these are dummies used to catch attempting to convert NULL to Register 1006 void cmpl(Register dst, void* junk); // dummy 1007 void cmpq(Register dst, void* junk); // dummy 1008 1009 void cmpw(Address dst, int imm16); 1010 1011 void cmpxchg8 (Address adr); 1012 1013 void cmpxchgb(Register reg, Address adr); 1014 void cmpxchgl(Register reg, Address adr); 1015 1016 void cmpxchgq(Register reg, Address adr); 1017 1018 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 1019 void comisd(XMMRegister dst, Address src); 1020 void comisd(XMMRegister dst, XMMRegister src); 1021 1022 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 1023 void comiss(XMMRegister dst, Address src); 1024 void comiss(XMMRegister dst, XMMRegister src); 1025 1026 // Identify processor type and features 1027 void cpuid(); 1028 1029 // CRC32C 1030 void crc32(Register crc, Register v, int8_t sizeInBytes); 1031 void crc32(Register crc, Address adr, int8_t sizeInBytes); 1032 1033 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value 1034 void cvtsd2ss(XMMRegister dst, XMMRegister src); 1035 void cvtsd2ss(XMMRegister dst, Address src); 1036 1037 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value 1038 void cvtsi2sdl(XMMRegister dst, Register src); 1039 void cvtsi2sdl(XMMRegister dst, Address src); 1040 void cvtsi2sdq(XMMRegister dst, Register src); 1041 void cvtsi2sdq(XMMRegister dst, Address src); 1042 1043 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value 1044 void cvtsi2ssl(XMMRegister dst, Register src); 1045 void cvtsi2ssl(XMMRegister dst, Address src); 1046 void cvtsi2ssq(XMMRegister dst, Register src); 1047 void cvtsi2ssq(XMMRegister dst, Address src); 1048 1049 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value 1050 void cvtdq2pd(XMMRegister dst, XMMRegister src); 1051 1052 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value 1053 void cvtdq2ps(XMMRegister dst, XMMRegister src); 1054 1055 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value 1056 void cvtss2sd(XMMRegister dst, XMMRegister src); 1057 void cvtss2sd(XMMRegister dst, Address src); 1058 1059 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer 1060 void cvttsd2sil(Register dst, Address src); 1061 void cvttsd2sil(Register dst, XMMRegister src); 1062 void cvttsd2siq(Register dst, XMMRegister src); 1063 1064 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer 1065 void cvttss2sil(Register dst, XMMRegister src); 1066 void cvttss2siq(Register dst, XMMRegister src); 1067 1068 void cvttpd2dq(XMMRegister dst, XMMRegister src); 1069 1070 // Divide Scalar Double-Precision Floating-Point Values 1071 void divsd(XMMRegister dst, Address src); 1072 void divsd(XMMRegister dst, XMMRegister src); 1073 1074 // Divide Scalar Single-Precision Floating-Point Values 1075 void divss(XMMRegister dst, Address src); 1076 void divss(XMMRegister dst, XMMRegister src); 1077 1078 void emms(); 1079 1080 void fabs(); 1081 1082 void fadd(int i); 1083 1084 void fadd_d(Address src); 1085 void fadd_s(Address src); 1086 1087 // "Alternate" versions of x87 instructions place result down in FPU 1088 // stack instead of on TOS 1089 1090 void fadda(int i); // "alternate" fadd 1091 void faddp(int i = 1); 1092 1093 void fchs(); 1094 1095 void fcom(int i); 1096 1097 void fcomp(int i = 1); 1098 void fcomp_d(Address src); 1099 void fcomp_s(Address src); 1100 1101 void fcompp(); 1102 1103 void fcos(); 1104 1105 void fdecstp(); 1106 1107 void fdiv(int i); 1108 void fdiv_d(Address src); 1109 void fdivr_s(Address src); 1110 void fdiva(int i); // "alternate" fdiv 1111 void fdivp(int i = 1); 1112 1113 void fdivr(int i); 1114 void fdivr_d(Address src); 1115 void fdiv_s(Address src); 1116 1117 void fdivra(int i); // "alternate" reversed fdiv 1118 1119 void fdivrp(int i = 1); 1120 1121 void ffree(int i = 0); 1122 1123 void fild_d(Address adr); 1124 void fild_s(Address adr); 1125 1126 void fincstp(); 1127 1128 void finit(); 1129 1130 void fist_s (Address adr); 1131 void fistp_d(Address adr); 1132 void fistp_s(Address adr); 1133 1134 void fld1(); 1135 1136 void fld_d(Address adr); 1137 void fld_s(Address adr); 1138 void fld_s(int index); 1139 void fld_x(Address adr); // extended-precision (80-bit) format 1140 1141 void fldcw(Address src); 1142 1143 void fldenv(Address src); 1144 1145 void fldlg2(); 1146 1147 void fldln2(); 1148 1149 void fldz(); 1150 1151 void flog(); 1152 void flog10(); 1153 1154 void fmul(int i); 1155 1156 void fmul_d(Address src); 1157 void fmul_s(Address src); 1158 1159 void fmula(int i); // "alternate" fmul 1160 1161 void fmulp(int i = 1); 1162 1163 void fnsave(Address dst); 1164 1165 void fnstcw(Address src); 1166 1167 void fnstsw_ax(); 1168 1169 void fprem(); 1170 void fprem1(); 1171 1172 void frstor(Address src); 1173 1174 void fsin(); 1175 1176 void fsqrt(); 1177 1178 void fst_d(Address adr); 1179 void fst_s(Address adr); 1180 1181 void fstp_d(Address adr); 1182 void fstp_d(int index); 1183 void fstp_s(Address adr); 1184 void fstp_x(Address adr); // extended-precision (80-bit) format 1185 1186 void fsub(int i); 1187 void fsub_d(Address src); 1188 void fsub_s(Address src); 1189 1190 void fsuba(int i); // "alternate" fsub 1191 1192 void fsubp(int i = 1); 1193 1194 void fsubr(int i); 1195 void fsubr_d(Address src); 1196 void fsubr_s(Address src); 1197 1198 void fsubra(int i); // "alternate" reversed fsub 1199 1200 void fsubrp(int i = 1); 1201 1202 void ftan(); 1203 1204 void ftst(); 1205 1206 void fucomi(int i = 1); 1207 void fucomip(int i = 1); 1208 1209 void fwait(); 1210 1211 void fxch(int i = 1); 1212 1213 void fxrstor(Address src); 1214 void xrstor(Address src); 1215 1216 void fxsave(Address dst); 1217 void xsave(Address dst); 1218 1219 void fyl2x(); 1220 void frndint(); 1221 void f2xm1(); 1222 void fldl2e(); 1223 1224 void hlt(); 1225 1226 void idivl(Register src); 1227 void divl(Register src); // Unsigned division 1228 1229 #ifdef _LP64 1230 void idivq(Register src); 1231 #endif 1232 1233 void imull(Register src); 1234 void imull(Register dst, Register src); 1235 void imull(Register dst, Register src, int value); 1236 void imull(Register dst, Address src); 1237 1238 #ifdef _LP64 1239 void imulq(Register dst, Register src); 1240 void imulq(Register dst, Register src, int value); 1241 void imulq(Register dst, Address src); 1242 #endif 1243 1244 // jcc is the generic conditional branch generator to run- 1245 // time routines, jcc is used for branches to labels. jcc 1246 // takes a branch opcode (cc) and a label (L) and generates 1247 // either a backward branch or a forward branch and links it 1248 // to the label fixup chain. Usage: 1249 // 1250 // Label L; // unbound label 1251 // jcc(cc, L); // forward branch to unbound label 1252 // bind(L); // bind label to the current pc 1253 // jcc(cc, L); // backward branch to bound label 1254 // bind(L); // illegal: a label may be bound only once 1255 // 1256 // Note: The same Label can be used for forward and backward branches 1257 // but it may be bound only once. 1258 1259 void jcc(Condition cc, Label& L, bool maybe_short = true); 1260 1261 // Conditional jump to a 8-bit offset to L. 1262 // WARNING: be very careful using this for forward jumps. If the label is 1263 // not bound within an 8-bit offset of this instruction, a run-time error 1264 // will occur. 1265 void jccb(Condition cc, Label& L); 1266 1267 void jmp(Address entry); // pc <- entry 1268 1269 // Label operations & relative jumps (PPUM Appendix D) 1270 void jmp(Label& L, bool maybe_short = true); // unconditional jump to L 1271 1272 void jmp(Register entry); // pc <- entry 1273 1274 // Unconditional 8-bit offset jump to L. 1275 // WARNING: be very careful using this for forward jumps. If the label is 1276 // not bound within an 8-bit offset of this instruction, a run-time error 1277 // will occur. 1278 void jmpb(Label& L); 1279 1280 void ldmxcsr( Address src ); 1281 1282 void leal(Register dst, Address src); 1283 1284 void leaq(Register dst, Address src); 1285 1286 void lfence(); 1287 1288 void lock(); 1289 1290 void lzcntl(Register dst, Register src); 1291 1292 #ifdef _LP64 1293 void lzcntq(Register dst, Register src); 1294 #endif 1295 1296 enum Membar_mask_bits { 1297 StoreStore = 1 << 3, 1298 LoadStore = 1 << 2, 1299 StoreLoad = 1 << 1, 1300 LoadLoad = 1 << 0 1301 }; 1302 1303 // Serializes memory and blows flags 1304 void membar(Membar_mask_bits order_constraint) { 1305 if (os::is_MP()) { 1306 // We only have to handle StoreLoad 1307 if (order_constraint & StoreLoad) { 1308 // All usable chips support "locked" instructions which suffice 1309 // as barriers, and are much faster than the alternative of 1310 // using cpuid instruction. We use here a locked add [esp-C],0. 1311 // This is conveniently otherwise a no-op except for blowing 1312 // flags, and introducing a false dependency on target memory 1313 // location. We can't do anything with flags, but we can avoid 1314 // memory dependencies in the current method by locked-adding 1315 // somewhere else on the stack. Doing [esp+C] will collide with 1316 // something on stack in current method, hence we go for [esp-C]. 1317 // It is convenient since it is almost always in data cache, for 1318 // any small C. We need to step back from SP to avoid data 1319 // dependencies with other things on below SP (callee-saves, for 1320 // example). Without a clear way to figure out the minimal safe 1321 // distance from SP, it makes sense to step back the complete 1322 // cache line, as this will also avoid possible second-order effects 1323 // with locked ops against the cache line. Our choice of offset 1324 // is bounded by x86 operand encoding, which should stay within 1325 // [-128; +127] to have the 8-byte displacement encoding. 1326 // 1327 // Any change to this code may need to revisit other places in 1328 // the code where this idiom is used, in particular the 1329 // orderAccess code. 1330 1331 int offset = -VM_Version::L1_line_size(); 1332 if (offset < -128) { 1333 offset = -128; 1334 } 1335 1336 lock(); 1337 addl(Address(rsp, offset), 0);// Assert the lock# signal here 1338 } 1339 } 1340 } 1341 1342 void mfence(); 1343 1344 // Moves 1345 1346 void mov64(Register dst, int64_t imm64); 1347 1348 void movb(Address dst, Register src); 1349 void movb(Address dst, int imm8); 1350 void movb(Register dst, Address src); 1351 1352 void movddup(XMMRegister dst, XMMRegister src); 1353 1354 void kmovbl(KRegister dst, Register src); 1355 void kmovbl(Register dst, KRegister src); 1356 void kmovwl(KRegister dst, Register src); 1357 void kmovwl(KRegister dst, Address src); 1358 void kmovwl(Register dst, KRegister src); 1359 void kmovdl(KRegister dst, Register src); 1360 void kmovdl(Register dst, KRegister src); 1361 void kmovql(KRegister dst, KRegister src); 1362 void kmovql(Address dst, KRegister src); 1363 void kmovql(KRegister dst, Address src); 1364 void kmovql(KRegister dst, Register src); 1365 void kmovql(Register dst, KRegister src); 1366 1367 void knotwl(KRegister dst, KRegister src); 1368 1369 void kortestbl(KRegister dst, KRegister src); 1370 void kortestwl(KRegister dst, KRegister src); 1371 void kortestdl(KRegister dst, KRegister src); 1372 void kortestql(KRegister dst, KRegister src); 1373 1374 void ktestq(KRegister src1, KRegister src2); 1375 void ktestd(KRegister src1, KRegister src2); 1376 1377 void ktestql(KRegister dst, KRegister src); 1378 1379 void movdl(XMMRegister dst, Register src); 1380 void movdl(Register dst, XMMRegister src); 1381 void movdl(XMMRegister dst, Address src); 1382 void movdl(Address dst, XMMRegister src); 1383 1384 // Move Double Quadword 1385 void movdq(XMMRegister dst, Register src); 1386 void movdq(Register dst, XMMRegister src); 1387 1388 // Move Aligned Double Quadword 1389 void movdqa(XMMRegister dst, XMMRegister src); 1390 void movdqa(XMMRegister dst, Address src); 1391 1392 // Move Unaligned Double Quadword 1393 void movdqu(Address dst, XMMRegister src); 1394 void movdqu(XMMRegister dst, Address src); 1395 void movdqu(XMMRegister dst, XMMRegister src); 1396 1397 // Move Unaligned 256bit Vector 1398 void vmovdqu(Address dst, XMMRegister src); 1399 void vmovdqu(XMMRegister dst, Address src); 1400 void vmovdqu(XMMRegister dst, XMMRegister src); 1401 1402 // Move Unaligned 512bit Vector 1403 void evmovdqub(Address dst, XMMRegister src, int vector_len); 1404 void evmovdqub(XMMRegister dst, Address src, int vector_len); 1405 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len); 1406 void evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len); 1407 void evmovdquw(Address dst, XMMRegister src, int vector_len); 1408 void evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len); 1409 void evmovdquw(XMMRegister dst, Address src, int vector_len); 1410 void evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len); 1411 void evmovdqul(Address dst, XMMRegister src, int vector_len); 1412 void evmovdqul(XMMRegister dst, Address src, int vector_len); 1413 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len); 1414 void evmovdquq(Address dst, XMMRegister src, int vector_len); 1415 void evmovdquq(XMMRegister dst, Address src, int vector_len); 1416 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len); 1417 1418 // Move lower 64bit to high 64bit in 128bit register 1419 void movlhps(XMMRegister dst, XMMRegister src); 1420 1421 void movl(Register dst, int32_t imm32); 1422 void movl(Address dst, int32_t imm32); 1423 void movl(Register dst, Register src); 1424 void movl(Register dst, Address src); 1425 void movl(Address dst, Register src); 1426 1427 // These dummies prevent using movl from converting a zero (like NULL) into Register 1428 // by giving the compiler two choices it can't resolve 1429 1430 void movl(Address dst, void* junk); 1431 void movl(Register dst, void* junk); 1432 1433 #ifdef _LP64 1434 void movq(Register dst, Register src); 1435 void movq(Register dst, Address src); 1436 void movq(Address dst, Register src); 1437 #endif 1438 1439 void movq(Address dst, MMXRegister src ); 1440 void movq(MMXRegister dst, Address src ); 1441 1442 #ifdef _LP64 1443 // These dummies prevent using movq from converting a zero (like NULL) into Register 1444 // by giving the compiler two choices it can't resolve 1445 1446 void movq(Address dst, void* dummy); 1447 void movq(Register dst, void* dummy); 1448 #endif 1449 1450 // Move Quadword 1451 void movq(Address dst, XMMRegister src); 1452 void movq(XMMRegister dst, Address src); 1453 1454 void movsbl(Register dst, Address src); 1455 void movsbl(Register dst, Register src); 1456 1457 #ifdef _LP64 1458 void movsbq(Register dst, Address src); 1459 void movsbq(Register dst, Register src); 1460 1461 // Move signed 32bit immediate to 64bit extending sign 1462 void movslq(Address dst, int32_t imm64); 1463 void movslq(Register dst, int32_t imm64); 1464 1465 void movslq(Register dst, Address src); 1466 void movslq(Register dst, Register src); 1467 void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous 1468 #endif 1469 1470 void movswl(Register dst, Address src); 1471 void movswl(Register dst, Register src); 1472 1473 #ifdef _LP64 1474 void movswq(Register dst, Address src); 1475 void movswq(Register dst, Register src); 1476 #endif 1477 1478 void movw(Address dst, int imm16); 1479 void movw(Register dst, Address src); 1480 void movw(Address dst, Register src); 1481 1482 void movzbl(Register dst, Address src); 1483 void movzbl(Register dst, Register src); 1484 1485 #ifdef _LP64 1486 void movzbq(Register dst, Address src); 1487 void movzbq(Register dst, Register src); 1488 #endif 1489 1490 void movzwl(Register dst, Address src); 1491 void movzwl(Register dst, Register src); 1492 1493 #ifdef _LP64 1494 void movzwq(Register dst, Address src); 1495 void movzwq(Register dst, Register src); 1496 #endif 1497 1498 // Unsigned multiply with RAX destination register 1499 void mull(Address src); 1500 void mull(Register src); 1501 1502 #ifdef _LP64 1503 void mulq(Address src); 1504 void mulq(Register src); 1505 void mulxq(Register dst1, Register dst2, Register src); 1506 #endif 1507 1508 // Multiply Scalar Double-Precision Floating-Point Values 1509 void mulsd(XMMRegister dst, Address src); 1510 void mulsd(XMMRegister dst, XMMRegister src); 1511 1512 // Multiply Scalar Single-Precision Floating-Point Values 1513 void mulss(XMMRegister dst, Address src); 1514 void mulss(XMMRegister dst, XMMRegister src); 1515 1516 void negl(Register dst); 1517 1518 #ifdef _LP64 1519 void negq(Register dst); 1520 #endif 1521 1522 void nop(int i = 1); 1523 1524 void notl(Register dst); 1525 1526 #ifdef _LP64 1527 void notq(Register dst); 1528 #endif 1529 1530 void orl(Address dst, int32_t imm32); 1531 void orl(Register dst, int32_t imm32); 1532 void orl(Register dst, Address src); 1533 void orl(Register dst, Register src); 1534 void orl(Address dst, Register src); 1535 1536 void orq(Address dst, int32_t imm32); 1537 void orq(Register dst, int32_t imm32); 1538 void orq(Register dst, Address src); 1539 void orq(Register dst, Register src); 1540 1541 // Pack with unsigned saturation 1542 void packuswb(XMMRegister dst, XMMRegister src); 1543 void packuswb(XMMRegister dst, Address src); 1544 void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1545 1546 // Pemutation of 64bit words 1547 void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len); 1548 void vpermq(XMMRegister dst, XMMRegister src, int imm8); 1549 void vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8); 1550 1551 void pause(); 1552 1553 // SSE4.2 string instructions 1554 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); 1555 void pcmpestri(XMMRegister xmm1, Address src, int imm8); 1556 1557 void pcmpeqb(XMMRegister dst, XMMRegister src); 1558 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1559 void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1560 void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1561 void evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); 1562 1563 void evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1564 void evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len); 1565 1566 void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len); 1567 void evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate of, int vector_len); 1568 void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len); 1569 1570 void pcmpeqw(XMMRegister dst, XMMRegister src); 1571 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1572 void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1573 void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1574 1575 void pcmpeqd(XMMRegister dst, XMMRegister src); 1576 void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1577 void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1578 void evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1579 1580 void pcmpeqq(XMMRegister dst, XMMRegister src); 1581 void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1582 void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len); 1583 void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len); 1584 1585 void pmovmskb(Register dst, XMMRegister src); 1586 void vpmovmskb(Register dst, XMMRegister src); 1587 1588 // SSE 4.1 extract 1589 void pextrd(Register dst, XMMRegister src, int imm8); 1590 void pextrq(Register dst, XMMRegister src, int imm8); 1591 void pextrd(Address dst, XMMRegister src, int imm8); 1592 void pextrq(Address dst, XMMRegister src, int imm8); 1593 void pextrb(Address dst, XMMRegister src, int imm8); 1594 // SSE 2 extract 1595 void pextrw(Register dst, XMMRegister src, int imm8); 1596 void pextrw(Address dst, XMMRegister src, int imm8); 1597 1598 // SSE 4.1 insert 1599 void pinsrd(XMMRegister dst, Register src, int imm8); 1600 void pinsrq(XMMRegister dst, Register src, int imm8); 1601 void pinsrd(XMMRegister dst, Address src, int imm8); 1602 void pinsrq(XMMRegister dst, Address src, int imm8); 1603 void pinsrb(XMMRegister dst, Address src, int imm8); 1604 // SSE 2 insert 1605 void pinsrw(XMMRegister dst, Register src, int imm8); 1606 void pinsrw(XMMRegister dst, Address src, int imm8); 1607 1608 // SSE4.1 packed move 1609 void pmovzxbw(XMMRegister dst, XMMRegister src); 1610 void pmovzxbw(XMMRegister dst, Address src); 1611 1612 void vpmovzxbw( XMMRegister dst, Address src, int vector_len); 1613 void evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len); 1614 1615 void evpmovwb(Address dst, XMMRegister src, int vector_len); 1616 void evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len); 1617 1618 #ifndef _LP64 // no 32bit push/pop on amd64 1619 void popl(Address dst); 1620 #endif 1621 1622 #ifdef _LP64 1623 void popq(Address dst); 1624 #endif 1625 1626 void popcntl(Register dst, Address src); 1627 void popcntl(Register dst, Register src); 1628 1629 #ifdef _LP64 1630 void popcntq(Register dst, Address src); 1631 void popcntq(Register dst, Register src); 1632 #endif 1633 1634 // Prefetches (SSE, SSE2, 3DNOW only) 1635 1636 void prefetchnta(Address src); 1637 void prefetchr(Address src); 1638 void prefetcht0(Address src); 1639 void prefetcht1(Address src); 1640 void prefetcht2(Address src); 1641 void prefetchw(Address src); 1642 1643 // Shuffle Bytes 1644 void pshufb(XMMRegister dst, XMMRegister src); 1645 void pshufb(XMMRegister dst, Address src); 1646 void vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1647 1648 // Shuffle Packed Doublewords 1649 void pshufd(XMMRegister dst, XMMRegister src, int mode); 1650 void pshufd(XMMRegister dst, Address src, int mode); 1651 void vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len); 1652 1653 // Shuffle Packed Low Words 1654 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1655 void pshuflw(XMMRegister dst, Address src, int mode); 1656 1657 // Shift Right by bytes Logical DoubleQuadword Immediate 1658 void psrldq(XMMRegister dst, int shift); 1659 // Shift Left by bytes Logical DoubleQuadword Immediate 1660 void pslldq(XMMRegister dst, int shift); 1661 1662 // Logical Compare 128bit 1663 void ptest(XMMRegister dst, XMMRegister src); 1664 void ptest(XMMRegister dst, Address src); 1665 // Logical Compare 256bit 1666 void vptest(XMMRegister dst, XMMRegister src); 1667 void vptest(XMMRegister dst, Address src); 1668 1669 // Interleave Low Bytes 1670 void punpcklbw(XMMRegister dst, XMMRegister src); 1671 void punpcklbw(XMMRegister dst, Address src); 1672 1673 // Interleave Low Doublewords 1674 void punpckldq(XMMRegister dst, XMMRegister src); 1675 void punpckldq(XMMRegister dst, Address src); 1676 1677 // Interleave Low Quadwords 1678 void punpcklqdq(XMMRegister dst, XMMRegister src); 1679 1680 #ifndef _LP64 // no 32bit push/pop on amd64 1681 void pushl(Address src); 1682 #endif 1683 1684 void pushq(Address src); 1685 1686 void rcll(Register dst, int imm8); 1687 1688 void rclq(Register dst, int imm8); 1689 1690 void rcrq(Register dst, int imm8); 1691 1692 void rcpps(XMMRegister dst, XMMRegister src); 1693 1694 void rcpss(XMMRegister dst, XMMRegister src); 1695 1696 void rdtsc(); 1697 1698 void ret(int imm16); 1699 1700 #ifdef _LP64 1701 void rorq(Register dst, int imm8); 1702 void rorxq(Register dst, Register src, int imm8); 1703 void rorxd(Register dst, Register src, int imm8); 1704 #endif 1705 1706 void sahf(); 1707 1708 void sarl(Register dst, int imm8); 1709 void sarl(Register dst); 1710 1711 void sarq(Register dst, int imm8); 1712 void sarq(Register dst); 1713 1714 void sbbl(Address dst, int32_t imm32); 1715 void sbbl(Register dst, int32_t imm32); 1716 void sbbl(Register dst, Address src); 1717 void sbbl(Register dst, Register src); 1718 1719 void sbbq(Address dst, int32_t imm32); 1720 void sbbq(Register dst, int32_t imm32); 1721 void sbbq(Register dst, Address src); 1722 void sbbq(Register dst, Register src); 1723 1724 void setb(Condition cc, Register dst); 1725 1726 void palignr(XMMRegister dst, XMMRegister src, int imm8); 1727 void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, int imm8, int vector_len); 1728 1729 void pblendw(XMMRegister dst, XMMRegister src, int imm8); 1730 1731 void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8); 1732 void sha1nexte(XMMRegister dst, XMMRegister src); 1733 void sha1msg1(XMMRegister dst, XMMRegister src); 1734 void sha1msg2(XMMRegister dst, XMMRegister src); 1735 // xmm0 is implicit additional source to the following instruction. 1736 void sha256rnds2(XMMRegister dst, XMMRegister src); 1737 void sha256msg1(XMMRegister dst, XMMRegister src); 1738 void sha256msg2(XMMRegister dst, XMMRegister src); 1739 1740 void shldl(Register dst, Register src); 1741 void shldl(Register dst, Register src, int8_t imm8); 1742 1743 void shll(Register dst, int imm8); 1744 void shll(Register dst); 1745 1746 void shlq(Register dst, int imm8); 1747 void shlq(Register dst); 1748 1749 void shrdl(Register dst, Register src); 1750 1751 void shrl(Register dst, int imm8); 1752 void shrl(Register dst); 1753 1754 void shrq(Register dst, int imm8); 1755 void shrq(Register dst); 1756 1757 void smovl(); // QQQ generic? 1758 1759 // Compute Square Root of Scalar Double-Precision Floating-Point Value 1760 void sqrtsd(XMMRegister dst, Address src); 1761 void sqrtsd(XMMRegister dst, XMMRegister src); 1762 1763 // Compute Square Root of Scalar Single-Precision Floating-Point Value 1764 void sqrtss(XMMRegister dst, Address src); 1765 void sqrtss(XMMRegister dst, XMMRegister src); 1766 1767 void std(); 1768 1769 void stmxcsr( Address dst ); 1770 1771 void subl(Address dst, int32_t imm32); 1772 void subl(Address dst, Register src); 1773 void subl(Register dst, int32_t imm32); 1774 void subl(Register dst, Address src); 1775 void subl(Register dst, Register src); 1776 1777 void subq(Address dst, int32_t imm32); 1778 void subq(Address dst, Register src); 1779 void subq(Register dst, int32_t imm32); 1780 void subq(Register dst, Address src); 1781 void subq(Register dst, Register src); 1782 1783 // Force generation of a 4 byte immediate value even if it fits into 8bit 1784 void subl_imm32(Register dst, int32_t imm32); 1785 void subq_imm32(Register dst, int32_t imm32); 1786 1787 // Subtract Scalar Double-Precision Floating-Point Values 1788 void subsd(XMMRegister dst, Address src); 1789 void subsd(XMMRegister dst, XMMRegister src); 1790 1791 // Subtract Scalar Single-Precision Floating-Point Values 1792 void subss(XMMRegister dst, Address src); 1793 void subss(XMMRegister dst, XMMRegister src); 1794 1795 void testb(Register dst, int imm8); 1796 void testb(Address dst, int imm8); 1797 1798 void testl(Register dst, int32_t imm32); 1799 void testl(Register dst, Register src); 1800 void testl(Register dst, Address src); 1801 1802 void testq(Register dst, int32_t imm32); 1803 void testq(Register dst, Register src); 1804 1805 // BMI - count trailing zeros 1806 void tzcntl(Register dst, Register src); 1807 void tzcntq(Register dst, Register src); 1808 1809 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 1810 void ucomisd(XMMRegister dst, Address src); 1811 void ucomisd(XMMRegister dst, XMMRegister src); 1812 1813 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 1814 void ucomiss(XMMRegister dst, Address src); 1815 void ucomiss(XMMRegister dst, XMMRegister src); 1816 1817 void xabort(int8_t imm8); 1818 1819 void xaddl(Address dst, Register src); 1820 1821 void xaddq(Address dst, Register src); 1822 1823 void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none); 1824 1825 void xchgl(Register reg, Address adr); 1826 void xchgl(Register dst, Register src); 1827 1828 void xchgq(Register reg, Address adr); 1829 void xchgq(Register dst, Register src); 1830 1831 void xend(); 1832 1833 // Get Value of Extended Control Register 1834 void xgetbv(); 1835 1836 void xorl(Register dst, int32_t imm32); 1837 void xorl(Register dst, Address src); 1838 void xorl(Register dst, Register src); 1839 1840 void xorb(Register dst, Address src); 1841 1842 void xorq(Register dst, Address src); 1843 void xorq(Register dst, Register src); 1844 1845 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 1846 1847 // AVX 3-operands scalar instructions (encoded with VEX prefix) 1848 1849 void vaddsd(XMMRegister dst, XMMRegister nds, Address src); 1850 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1851 void vaddss(XMMRegister dst, XMMRegister nds, Address src); 1852 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1853 void vdivsd(XMMRegister dst, XMMRegister nds, Address src); 1854 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1855 void vdivss(XMMRegister dst, XMMRegister nds, Address src); 1856 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1857 void vmulsd(XMMRegister dst, XMMRegister nds, Address src); 1858 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1859 void vmulss(XMMRegister dst, XMMRegister nds, Address src); 1860 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1861 void vsubsd(XMMRegister dst, XMMRegister nds, Address src); 1862 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1863 void vsubss(XMMRegister dst, XMMRegister nds, Address src); 1864 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1865 1866 void shlxl(Register dst, Register src1, Register src2); 1867 void shlxq(Register dst, Register src1, Register src2); 1868 1869 //====================VECTOR ARITHMETIC===================================== 1870 1871 // Add Packed Floating-Point Values 1872 void addpd(XMMRegister dst, XMMRegister src); 1873 void addpd(XMMRegister dst, Address src); 1874 void addps(XMMRegister dst, XMMRegister src); 1875 void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1876 void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1877 void vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1878 void vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1879 1880 // Subtract Packed Floating-Point Values 1881 void subpd(XMMRegister dst, XMMRegister src); 1882 void subps(XMMRegister dst, XMMRegister src); 1883 void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1884 void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1885 void vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1886 void vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1887 1888 // Multiply Packed Floating-Point Values 1889 void mulpd(XMMRegister dst, XMMRegister src); 1890 void mulpd(XMMRegister dst, Address src); 1891 void mulps(XMMRegister dst, XMMRegister src); 1892 void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1893 void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1894 void vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1895 void vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1896 1897 // Divide Packed Floating-Point Values 1898 void divpd(XMMRegister dst, XMMRegister src); 1899 void divps(XMMRegister dst, XMMRegister src); 1900 void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1901 void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1902 void vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1903 void vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1904 1905 // Sqrt Packed Floating-Point Values - Double precision only 1906 void vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len); 1907 void vsqrtpd(XMMRegister dst, Address src, int vector_len); 1908 1909 // Bitwise Logical AND of Packed Floating-Point Values 1910 void andpd(XMMRegister dst, XMMRegister src); 1911 void andps(XMMRegister dst, XMMRegister src); 1912 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1913 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1914 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1915 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1916 1917 void unpckhpd(XMMRegister dst, XMMRegister src); 1918 void unpcklpd(XMMRegister dst, XMMRegister src); 1919 1920 // Bitwise Logical XOR of Packed Floating-Point Values 1921 void xorpd(XMMRegister dst, XMMRegister src); 1922 void xorps(XMMRegister dst, XMMRegister src); 1923 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1924 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1925 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1926 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1927 1928 // Add horizontal packed integers 1929 void vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1930 void vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1931 void phaddw(XMMRegister dst, XMMRegister src); 1932 void phaddd(XMMRegister dst, XMMRegister src); 1933 1934 // Add packed integers 1935 void paddb(XMMRegister dst, XMMRegister src); 1936 void paddw(XMMRegister dst, XMMRegister src); 1937 void paddd(XMMRegister dst, XMMRegister src); 1938 void paddd(XMMRegister dst, Address src); 1939 void paddq(XMMRegister dst, XMMRegister src); 1940 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1941 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1942 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1943 void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1944 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1945 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1946 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1947 void vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1948 1949 // Sub packed integers 1950 void psubb(XMMRegister dst, XMMRegister src); 1951 void psubw(XMMRegister dst, XMMRegister src); 1952 void psubd(XMMRegister dst, XMMRegister src); 1953 void psubq(XMMRegister dst, XMMRegister src); 1954 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1955 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1956 void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1957 void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1958 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1959 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1960 void vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1961 void vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1962 1963 // Multiply packed integers (only shorts and ints) 1964 void pmullw(XMMRegister dst, XMMRegister src); 1965 void pmulld(XMMRegister dst, XMMRegister src); 1966 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1967 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1968 void vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1969 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1970 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1971 void vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1972 1973 // Shift left packed integers 1974 void psllw(XMMRegister dst, int shift); 1975 void pslld(XMMRegister dst, int shift); 1976 void psllq(XMMRegister dst, int shift); 1977 void psllw(XMMRegister dst, XMMRegister shift); 1978 void pslld(XMMRegister dst, XMMRegister shift); 1979 void psllq(XMMRegister dst, XMMRegister shift); 1980 void vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 1981 void vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len); 1982 void vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 1983 void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 1984 void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 1985 void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 1986 1987 // Logical shift right packed integers 1988 void psrlw(XMMRegister dst, int shift); 1989 void psrld(XMMRegister dst, int shift); 1990 void psrlq(XMMRegister dst, int shift); 1991 void psrlw(XMMRegister dst, XMMRegister shift); 1992 void psrld(XMMRegister dst, XMMRegister shift); 1993 void psrlq(XMMRegister dst, XMMRegister shift); 1994 void vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 1995 void vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len); 1996 void vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 1997 void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 1998 void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 1999 void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2000 2001 // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs) 2002 void psraw(XMMRegister dst, int shift); 2003 void psrad(XMMRegister dst, int shift); 2004 void psraw(XMMRegister dst, XMMRegister shift); 2005 void psrad(XMMRegister dst, XMMRegister shift); 2006 void vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2007 void vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2008 void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2009 void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2010 2011 // And packed integers 2012 void pand(XMMRegister dst, XMMRegister src); 2013 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2014 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2015 2016 // Andn packed integers 2017 void pandn(XMMRegister dst, XMMRegister src); 2018 2019 // Or packed integers 2020 void por(XMMRegister dst, XMMRegister src); 2021 void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2022 void vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2023 2024 // Xor packed integers 2025 void pxor(XMMRegister dst, XMMRegister src); 2026 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2027 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2028 2029 // vinserti forms 2030 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2031 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2032 void vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2033 void vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2034 void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2035 2036 // vinsertf forms 2037 void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2038 void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2039 void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2040 void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2041 void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); 2042 void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); 2043 2044 // vextracti forms 2045 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8); 2046 void vextracti128(Address dst, XMMRegister src, uint8_t imm8); 2047 void vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2048 void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8); 2049 void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); 2050 void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2051 2052 // vextractf forms 2053 void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8); 2054 void vextractf128(Address dst, XMMRegister src, uint8_t imm8); 2055 void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2056 void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8); 2057 void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); 2058 void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); 2059 void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8); 2060 2061 // legacy xmm sourced word/dword replicate 2062 void vpbroadcastw(XMMRegister dst, XMMRegister src); 2063 void vpbroadcastd(XMMRegister dst, XMMRegister src); 2064 2065 // xmm/mem sourced byte/word/dword/qword replicate 2066 void evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len); 2067 void evpbroadcastb(XMMRegister dst, Address src, int vector_len); 2068 void evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 2069 void evpbroadcastw(XMMRegister dst, Address src, int vector_len); 2070 void evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len); 2071 void evpbroadcastd(XMMRegister dst, Address src, int vector_len); 2072 void evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len); 2073 void evpbroadcastq(XMMRegister dst, Address src, int vector_len); 2074 2075 // scalar single/double precision replicate 2076 void evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len); 2077 void evpbroadcastss(XMMRegister dst, Address src, int vector_len); 2078 void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len); 2079 void evpbroadcastsd(XMMRegister dst, Address src, int vector_len); 2080 2081 // gpr sourced byte/word/dword/qword replicate 2082 void evpbroadcastb(XMMRegister dst, Register src, int vector_len); 2083 void evpbroadcastw(XMMRegister dst, Register src, int vector_len); 2084 void evpbroadcastd(XMMRegister dst, Register src, int vector_len); 2085 void evpbroadcastq(XMMRegister dst, Register src, int vector_len); 2086 2087 // Carry-Less Multiplication Quadword 2088 void pclmulqdq(XMMRegister dst, XMMRegister src, int mask); 2089 void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask); 2090 2091 // AVX instruction which is used to clear upper 128 bits of YMM registers and 2092 // to avoid transaction penalty between AVX and SSE states. There is no 2093 // penalty if legacy SSE instructions are encoded using VEX prefix because 2094 // they always clear upper 128 bits. It should be used before calling 2095 // runtime code and native libraries. 2096 void vzeroupper(); 2097 2098 // AVX support for vectorized conditional move (double). The following two instructions used only coupled. 2099 void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len); 2100 void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len); 2101 2102 protected: 2103 // Next instructions require address alignment 16 bytes SSE mode. 2104 // They should be called only from corresponding MacroAssembler instructions. 2105 void andpd(XMMRegister dst, Address src); 2106 void andps(XMMRegister dst, Address src); 2107 void xorpd(XMMRegister dst, Address src); 2108 void xorps(XMMRegister dst, Address src); 2109 2110 }; 2111 2112 // The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions. 2113 // Specific set functions are for specialized use, else defaults or whatever was supplied to object construction 2114 // are applied. 2115 class InstructionAttr { 2116 public: 2117 InstructionAttr( 2118 int vector_len, // The length of vector to be applied in encoding - for both AVX and EVEX 2119 bool rex_vex_w, // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true 2120 bool legacy_mode, // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX 2121 bool no_reg_mask, // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition 2122 bool uses_vl) // This instruction may have legacy constraints based on vector length for EVEX 2123 : 2124 _avx_vector_len(vector_len), 2125 _rex_vex_w(rex_vex_w), 2126 _rex_vex_w_reverted(false), 2127 _legacy_mode(legacy_mode), 2128 _no_reg_mask(no_reg_mask), 2129 _uses_vl(uses_vl), 2130 _tuple_type(Assembler::EVEX_ETUP), 2131 _input_size_in_bits(Assembler::EVEX_NObit), 2132 _is_evex_instruction(false), 2133 _evex_encoding(0), 2134 _is_clear_context(false), 2135 _is_extended_context(false), 2136 _current_assembler(NULL), 2137 _embedded_opmask_register_specifier(1) { // hard code k1, it will be initialized for now 2138 if (UseAVX < 3) _legacy_mode = true; 2139 } 2140 2141 ~InstructionAttr() { 2142 if (_current_assembler != NULL) { 2143 _current_assembler->clear_attributes(); 2144 } 2145 _current_assembler = NULL; 2146 } 2147 2148 private: 2149 int _avx_vector_len; 2150 bool _rex_vex_w; 2151 bool _rex_vex_w_reverted; 2152 bool _legacy_mode; 2153 bool _no_reg_mask; 2154 bool _uses_vl; 2155 int _tuple_type; 2156 int _input_size_in_bits; 2157 bool _is_evex_instruction; 2158 int _evex_encoding; 2159 bool _is_clear_context; 2160 bool _is_extended_context; 2161 int _embedded_opmask_register_specifier; 2162 2163 Assembler *_current_assembler; 2164 2165 public: 2166 // query functions for field accessors 2167 int get_vector_len(void) const { return _avx_vector_len; } 2168 bool is_rex_vex_w(void) const { return _rex_vex_w; } 2169 bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; } 2170 bool is_legacy_mode(void) const { return _legacy_mode; } 2171 bool is_no_reg_mask(void) const { return _no_reg_mask; } 2172 bool uses_vl(void) const { return _uses_vl; } 2173 int get_tuple_type(void) const { return _tuple_type; } 2174 int get_input_size(void) const { return _input_size_in_bits; } 2175 int is_evex_instruction(void) const { return _is_evex_instruction; } 2176 int get_evex_encoding(void) const { return _evex_encoding; } 2177 bool is_clear_context(void) const { return _is_clear_context; } 2178 bool is_extended_context(void) const { return _is_extended_context; } 2179 int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; } 2180 2181 // Set the vector len manually 2182 void set_vector_len(int vector_len) { _avx_vector_len = vector_len; } 2183 2184 // Set revert rex_vex_w for avx encoding 2185 void set_rex_vex_w_reverted(void) { _rex_vex_w_reverted = true; } 2186 2187 // Set rex_vex_w based on state 2188 void set_rex_vex_w(bool state) { _rex_vex_w = state; } 2189 2190 // Set the instruction to be encoded in AVX mode 2191 void set_is_legacy_mode(void) { _legacy_mode = true; } 2192 2193 // Set the current instuction to be encoded as an EVEX instuction 2194 void set_is_evex_instruction(void) { _is_evex_instruction = true; } 2195 2196 // Internal encoding data used in compressed immediate offset programming 2197 void set_evex_encoding(int value) { _evex_encoding = value; } 2198 2199 // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components 2200 void set_is_clear_context(void) { _is_clear_context = true; } 2201 2202 // Map back to current asembler so that we can manage object level assocation 2203 void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; } 2204 2205 // Address modifiers used for compressed displacement calculation 2206 void set_address_attributes(int tuple_type, int input_size_in_bits) { 2207 if (VM_Version::supports_evex()) { 2208 _tuple_type = tuple_type; 2209 _input_size_in_bits = input_size_in_bits; 2210 } 2211 } 2212 2213 // Set embedded opmask register specifier. 2214 void set_embedded_opmask_register_specifier(KRegister mask) { 2215 _embedded_opmask_register_specifier = (*mask).encoding() & 0x7; 2216 } 2217 2218 }; 2219 2220 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP