1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP 26 #define CPU_X86_VM_ASSEMBLER_X86_HPP 27 28 #include "asm/register.hpp" 29 #include "vm_version_x86.hpp" 30 31 class BiasedLockingCounters; 32 33 // Contains all the definitions needed for x86 assembly code generation. 34 35 // Calling convention 36 class Argument VALUE_OBJ_CLASS_SPEC { 37 public: 38 enum { 39 #ifdef _LP64 40 #ifdef _WIN64 41 n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 42 n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) 43 #else 44 n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 45 n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) 46 #endif // _WIN64 47 n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... 48 n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... 49 #else 50 n_register_parameters = 0 // 0 registers used to pass arguments 51 #endif // _LP64 52 }; 53 }; 54 55 56 #ifdef _LP64 57 // Symbolically name the register arguments used by the c calling convention. 58 // Windows is different from linux/solaris. So much for standards... 59 60 #ifdef _WIN64 61 62 REGISTER_DECLARATION(Register, c_rarg0, rcx); 63 REGISTER_DECLARATION(Register, c_rarg1, rdx); 64 REGISTER_DECLARATION(Register, c_rarg2, r8); 65 REGISTER_DECLARATION(Register, c_rarg3, r9); 66 67 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 68 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 69 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 70 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 71 72 #else 73 74 REGISTER_DECLARATION(Register, c_rarg0, rdi); 75 REGISTER_DECLARATION(Register, c_rarg1, rsi); 76 REGISTER_DECLARATION(Register, c_rarg2, rdx); 77 REGISTER_DECLARATION(Register, c_rarg3, rcx); 78 REGISTER_DECLARATION(Register, c_rarg4, r8); 79 REGISTER_DECLARATION(Register, c_rarg5, r9); 80 81 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 82 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 83 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 84 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 85 REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); 86 REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); 87 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); 88 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); 89 90 #endif // _WIN64 91 92 // Symbolically name the register arguments used by the Java calling convention. 93 // We have control over the convention for java so we can do what we please. 94 // What pleases us is to offset the java calling convention so that when 95 // we call a suitable jni method the arguments are lined up and we don't 96 // have to do little shuffling. A suitable jni method is non-static and a 97 // small number of arguments (two fewer args on windows) 98 // 99 // |-------------------------------------------------------| 100 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | 101 // |-------------------------------------------------------| 102 // | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) 103 // | rdi rsi rdx rcx r8 r9 | solaris/linux 104 // |-------------------------------------------------------| 105 // | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | 106 // |-------------------------------------------------------| 107 108 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); 109 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); 110 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); 111 // Windows runs out of register args here 112 #ifdef _WIN64 113 REGISTER_DECLARATION(Register, j_rarg3, rdi); 114 REGISTER_DECLARATION(Register, j_rarg4, rsi); 115 #else 116 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); 117 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); 118 #endif /* _WIN64 */ 119 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); 120 121 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); 122 REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); 123 REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); 124 REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); 125 REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); 126 REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); 127 REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); 128 REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); 129 130 REGISTER_DECLARATION(Register, rscratch1, r10); // volatile 131 REGISTER_DECLARATION(Register, rscratch2, r11); // volatile 132 133 REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved 134 REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved 135 136 #else 137 // rscratch1 will apear in 32bit code that is dead but of course must compile 138 // Using noreg ensures if the dead code is incorrectly live and executed it 139 // will cause an assertion failure 140 #define rscratch1 noreg 141 #define rscratch2 noreg 142 143 #endif // _LP64 144 145 // JSR 292 146 // On x86, the SP does not have to be saved when invoking method handle intrinsics 147 // or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg. 148 REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg); 149 150 // Address is an abstraction used to represent a memory location 151 // using any of the amd64 addressing modes with one object. 152 // 153 // Note: A register location is represented via a Register, not 154 // via an address for efficiency & simplicity reasons. 155 156 class ArrayAddress; 157 158 class Address VALUE_OBJ_CLASS_SPEC { 159 public: 160 enum ScaleFactor { 161 no_scale = -1, 162 times_1 = 0, 163 times_2 = 1, 164 times_4 = 2, 165 times_8 = 3, 166 times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4) 167 }; 168 static ScaleFactor times(int size) { 169 assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size"); 170 if (size == 8) return times_8; 171 if (size == 4) return times_4; 172 if (size == 2) return times_2; 173 return times_1; 174 } 175 static int scale_size(ScaleFactor scale) { 176 assert(scale != no_scale, ""); 177 assert(((1 << (int)times_1) == 1 && 178 (1 << (int)times_2) == 2 && 179 (1 << (int)times_4) == 4 && 180 (1 << (int)times_8) == 8), ""); 181 return (1 << (int)scale); 182 } 183 184 private: 185 Register _base; 186 Register _index; 187 ScaleFactor _scale; 188 int _disp; 189 RelocationHolder _rspec; 190 191 // Easily misused constructors make them private 192 // %%% can we make these go away? 193 NOT_LP64(Address(address loc, RelocationHolder spec);) 194 Address(int disp, address loc, relocInfo::relocType rtype); 195 Address(int disp, address loc, RelocationHolder spec); 196 197 public: 198 199 int disp() { return _disp; } 200 // creation 201 Address() 202 : _base(noreg), 203 _index(noreg), 204 _scale(no_scale), 205 _disp(0) { 206 } 207 208 // No default displacement otherwise Register can be implicitly 209 // converted to 0(Register) which is quite a different animal. 210 211 Address(Register base, int disp) 212 : _base(base), 213 _index(noreg), 214 _scale(no_scale), 215 _disp(disp) { 216 } 217 218 Address(Register base, Register index, ScaleFactor scale, int disp = 0) 219 : _base (base), 220 _index(index), 221 _scale(scale), 222 _disp (disp) { 223 assert(!index->is_valid() == (scale == Address::no_scale), 224 "inconsistent address"); 225 } 226 227 Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0) 228 : _base (base), 229 _index(index.register_or_noreg()), 230 _scale(scale), 231 _disp (disp + (index.constant_or_zero() * scale_size(scale))) { 232 if (!index.is_register()) scale = Address::no_scale; 233 assert(!_index->is_valid() == (scale == Address::no_scale), 234 "inconsistent address"); 235 } 236 237 Address plus_disp(int disp) const { 238 Address a = (*this); 239 a._disp += disp; 240 return a; 241 } 242 Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const { 243 Address a = (*this); 244 a._disp += disp.constant_or_zero() * scale_size(scale); 245 if (disp.is_register()) { 246 assert(!a.index()->is_valid(), "competing indexes"); 247 a._index = disp.as_register(); 248 a._scale = scale; 249 } 250 return a; 251 } 252 bool is_same_address(Address a) const { 253 // disregard _rspec 254 return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale; 255 } 256 257 // The following two overloads are used in connection with the 258 // ByteSize type (see sizes.hpp). They simplify the use of 259 // ByteSize'd arguments in assembly code. Note that their equivalent 260 // for the optimized build are the member functions with int disp 261 // argument since ByteSize is mapped to an int type in that case. 262 // 263 // Note: DO NOT introduce similar overloaded functions for WordSize 264 // arguments as in the optimized mode, both ByteSize and WordSize 265 // are mapped to the same type and thus the compiler cannot make a 266 // distinction anymore (=> compiler errors). 267 268 #ifdef ASSERT 269 Address(Register base, ByteSize disp) 270 : _base(base), 271 _index(noreg), 272 _scale(no_scale), 273 _disp(in_bytes(disp)) { 274 } 275 276 Address(Register base, Register index, ScaleFactor scale, ByteSize disp) 277 : _base(base), 278 _index(index), 279 _scale(scale), 280 _disp(in_bytes(disp)) { 281 assert(!index->is_valid() == (scale == Address::no_scale), 282 "inconsistent address"); 283 } 284 285 Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp) 286 : _base (base), 287 _index(index.register_or_noreg()), 288 _scale(scale), 289 _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) { 290 if (!index.is_register()) scale = Address::no_scale; 291 assert(!_index->is_valid() == (scale == Address::no_scale), 292 "inconsistent address"); 293 } 294 295 #endif // ASSERT 296 297 // accessors 298 bool uses(Register reg) const { return _base == reg || _index == reg; } 299 Register base() const { return _base; } 300 Register index() const { return _index; } 301 ScaleFactor scale() const { return _scale; } 302 int disp() const { return _disp; } 303 304 // Convert the raw encoding form into the form expected by the constructor for 305 // Address. An index of 4 (rsp) corresponds to having no index, so convert 306 // that to noreg for the Address constructor. 307 static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc); 308 309 static Address make_array(ArrayAddress); 310 311 private: 312 bool base_needs_rex() const { 313 return _base != noreg && _base->encoding() >= 8; 314 } 315 316 bool index_needs_rex() const { 317 return _index != noreg &&_index->encoding() >= 8; 318 } 319 320 relocInfo::relocType reloc() const { return _rspec.type(); } 321 322 friend class Assembler; 323 friend class MacroAssembler; 324 friend class LIR_Assembler; // base/index/scale/disp 325 }; 326 327 // 328 // AddressLiteral has been split out from Address because operands of this type 329 // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out 330 // the few instructions that need to deal with address literals are unique and the 331 // MacroAssembler does not have to implement every instruction in the Assembler 332 // in order to search for address literals that may need special handling depending 333 // on the instruction and the platform. As small step on the way to merging i486/amd64 334 // directories. 335 // 336 class AddressLiteral VALUE_OBJ_CLASS_SPEC { 337 friend class ArrayAddress; 338 RelocationHolder _rspec; 339 // Typically we use AddressLiterals we want to use their rval 340 // However in some situations we want the lval (effect address) of the item. 341 // We provide a special factory for making those lvals. 342 bool _is_lval; 343 344 // If the target is far we'll need to load the ea of this to 345 // a register to reach it. Otherwise if near we can do rip 346 // relative addressing. 347 348 address _target; 349 350 protected: 351 // creation 352 AddressLiteral() 353 : _is_lval(false), 354 _target(NULL) 355 {} 356 357 public: 358 359 360 AddressLiteral(address target, relocInfo::relocType rtype); 361 362 AddressLiteral(address target, RelocationHolder const& rspec) 363 : _rspec(rspec), 364 _is_lval(false), 365 _target(target) 366 {} 367 368 AddressLiteral addr() { 369 AddressLiteral ret = *this; 370 ret._is_lval = true; 371 return ret; 372 } 373 374 375 private: 376 377 address target() { return _target; } 378 bool is_lval() { return _is_lval; } 379 380 relocInfo::relocType reloc() const { return _rspec.type(); } 381 const RelocationHolder& rspec() const { return _rspec; } 382 383 friend class Assembler; 384 friend class MacroAssembler; 385 friend class Address; 386 friend class LIR_Assembler; 387 }; 388 389 // Convience classes 390 class RuntimeAddress: public AddressLiteral { 391 392 public: 393 394 RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} 395 396 }; 397 398 class ExternalAddress: public AddressLiteral { 399 private: 400 static relocInfo::relocType reloc_for_target(address target) { 401 // Sometimes ExternalAddress is used for values which aren't 402 // exactly addresses, like the card table base. 403 // external_word_type can't be used for values in the first page 404 // so just skip the reloc in that case. 405 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 406 } 407 408 public: 409 410 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {} 411 412 }; 413 414 class InternalAddress: public AddressLiteral { 415 416 public: 417 418 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} 419 420 }; 421 422 // x86 can do array addressing as a single operation since disp can be an absolute 423 // address amd64 can't. We create a class that expresses the concept but does extra 424 // magic on amd64 to get the final result 425 426 class ArrayAddress VALUE_OBJ_CLASS_SPEC { 427 private: 428 429 AddressLiteral _base; 430 Address _index; 431 432 public: 433 434 ArrayAddress() {}; 435 ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; 436 AddressLiteral base() { return _base; } 437 Address index() { return _index; } 438 439 }; 440 441 const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512*2 / wordSize); 442 443 // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction 444 // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write 445 // is what you get. The Assembler is generating code into a CodeBuffer. 446 447 class Assembler : public AbstractAssembler { 448 friend class AbstractAssembler; // for the non-virtual hack 449 friend class LIR_Assembler; // as_Address() 450 friend class StubGenerator; 451 452 public: 453 enum Condition { // The x86 condition codes used for conditional jumps/moves. 454 zero = 0x4, 455 notZero = 0x5, 456 equal = 0x4, 457 notEqual = 0x5, 458 less = 0xc, 459 lessEqual = 0xe, 460 greater = 0xf, 461 greaterEqual = 0xd, 462 below = 0x2, 463 belowEqual = 0x6, 464 above = 0x7, 465 aboveEqual = 0x3, 466 overflow = 0x0, 467 noOverflow = 0x1, 468 carrySet = 0x2, 469 carryClear = 0x3, 470 negative = 0x8, 471 positive = 0x9, 472 parity = 0xa, 473 noParity = 0xb 474 }; 475 476 enum Prefix { 477 // segment overrides 478 CS_segment = 0x2e, 479 SS_segment = 0x36, 480 DS_segment = 0x3e, 481 ES_segment = 0x26, 482 FS_segment = 0x64, 483 GS_segment = 0x65, 484 485 REX = 0x40, 486 487 REX_B = 0x41, 488 REX_X = 0x42, 489 REX_XB = 0x43, 490 REX_R = 0x44, 491 REX_RB = 0x45, 492 REX_RX = 0x46, 493 REX_RXB = 0x47, 494 495 REX_W = 0x48, 496 497 REX_WB = 0x49, 498 REX_WX = 0x4A, 499 REX_WXB = 0x4B, 500 REX_WR = 0x4C, 501 REX_WRB = 0x4D, 502 REX_WRX = 0x4E, 503 REX_WRXB = 0x4F, 504 505 VEX_3bytes = 0xC4, 506 VEX_2bytes = 0xC5, 507 EVEX_4bytes = 0x62, 508 Prefix_EMPTY = 0x0 509 }; 510 511 enum VexPrefix { 512 VEX_B = 0x20, 513 VEX_X = 0x40, 514 VEX_R = 0x80, 515 VEX_W = 0x80 516 }; 517 518 enum ExexPrefix { 519 EVEX_F = 0x04, 520 EVEX_V = 0x08, 521 EVEX_Rb = 0x10, 522 EVEX_X = 0x40, 523 EVEX_Z = 0x80 524 }; 525 526 enum VexSimdPrefix { 527 VEX_SIMD_NONE = 0x0, 528 VEX_SIMD_66 = 0x1, 529 VEX_SIMD_F3 = 0x2, 530 VEX_SIMD_F2 = 0x3 531 }; 532 533 enum VexOpcode { 534 VEX_OPCODE_NONE = 0x0, 535 VEX_OPCODE_0F = 0x1, 536 VEX_OPCODE_0F_38 = 0x2, 537 VEX_OPCODE_0F_3A = 0x3 538 }; 539 540 enum AvxVectorLen { 541 AVX_128bit = 0x0, 542 AVX_256bit = 0x1, 543 AVX_512bit = 0x2, 544 AVX_NoVec = 0x4 545 }; 546 547 enum EvexTupleType { 548 EVEX_FV = 0, 549 EVEX_HV = 4, 550 EVEX_FVM = 6, 551 EVEX_T1S = 7, 552 EVEX_T1F = 11, 553 EVEX_T2 = 13, 554 EVEX_T4 = 15, 555 EVEX_T8 = 17, 556 EVEX_HVM = 18, 557 EVEX_QVM = 19, 558 EVEX_OVM = 20, 559 EVEX_M128 = 21, 560 EVEX_DUP = 22, 561 EVEX_ETUP = 23 562 }; 563 564 enum EvexInputSizeInBits { 565 EVEX_8bit = 0, 566 EVEX_16bit = 1, 567 EVEX_32bit = 2, 568 EVEX_64bit = 3 569 }; 570 571 enum WhichOperand { 572 // input to locate_operand, and format code for relocations 573 imm_operand = 0, // embedded 32-bit|64-bit immediate operand 574 disp32_operand = 1, // embedded 32-bit displacement or address 575 call32_operand = 2, // embedded 32-bit self-relative displacement 576 #ifndef _LP64 577 _WhichOperand_limit = 3 578 #else 579 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop 580 _WhichOperand_limit = 4 581 #endif 582 }; 583 584 585 586 // NOTE: The general philopsophy of the declarations here is that 64bit versions 587 // of instructions are freely declared without the need for wrapping them an ifdef. 588 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.) 589 // In the .cpp file the implementations are wrapped so that they are dropped out 590 // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL 591 // to the size it was prior to merging up the 32bit and 64bit assemblers. 592 // 593 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction 594 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down. 595 596 private: 597 598 int evex_encoding; 599 int input_size_in_bits; 600 int avx_vector_len; 601 int tuple_type; 602 bool is_evex_instruction; 603 604 // 64bit prefixes 605 int prefix_and_encode(int reg_enc, bool byteinst = false); 606 int prefixq_and_encode(int reg_enc); 607 608 int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false); 609 int prefixq_and_encode(int dst_enc, int src_enc); 610 611 void prefix(Register reg); 612 void prefix(Register dst, Register src, Prefix p); 613 void prefix(Register dst, Address adr, Prefix p); 614 void prefix(Address adr); 615 void prefixq(Address adr); 616 617 void prefix(Address adr, Register reg, bool byteinst = false); 618 void prefix(Address adr, XMMRegister reg); 619 void prefixq(Address adr, Register reg); 620 void prefixq(Address adr, XMMRegister reg); 621 622 void prefetch_prefix(Address src); 623 624 void rex_prefix(Address adr, XMMRegister xreg, 625 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 626 int rex_prefix_and_encode(int dst_enc, int src_enc, 627 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 628 629 void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, 630 int nds_enc, VexSimdPrefix pre, VexOpcode opc, 631 int vector_len); 632 633 void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, bool evex_r, bool evex_v, 634 int nds_enc, VexSimdPrefix pre, VexOpcode opc, 635 bool is_extended_context, bool is_merge_context, 636 int vector_len, bool no_mask_reg ); 637 638 void vex_prefix(Address adr, int nds_enc, int xreg_enc, 639 VexSimdPrefix pre, VexOpcode opc, 640 bool vex_w, int vector_len, 641 bool legacy_mode = false, bool no_mask_reg = false); 642 643 void vex_prefix(XMMRegister dst, XMMRegister nds, Address src, 644 VexSimdPrefix pre, int vector_len = AVX_128bit, 645 bool no_mask_reg = false, bool legacy_mode = false) { 646 int dst_enc = dst->encoding(); 647 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 648 vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector_len, legacy_mode, no_mask_reg); 649 } 650 651 void vex_prefix_q(XMMRegister dst, XMMRegister nds, Address src, 652 VexSimdPrefix pre, int vector_len = AVX_128bit, 653 bool no_mask_reg = false) { 654 int dst_enc = dst->encoding(); 655 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 656 vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, true, vector_len, false, no_mask_reg); 657 } 658 659 void vex_prefix_0F38(Register dst, Register nds, Address src, bool no_mask_reg = false) { 660 bool vex_w = false; 661 int vector_len = AVX_128bit; 662 vex_prefix(src, nds->encoding(), dst->encoding(), 663 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, 664 vector_len, no_mask_reg); 665 } 666 667 void vex_prefix_0F38_legacy(Register dst, Register nds, Address src, bool no_mask_reg = false) { 668 bool vex_w = false; 669 int vector_len = AVX_128bit; 670 vex_prefix(src, nds->encoding(), dst->encoding(), 671 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, 672 vector_len, true, no_mask_reg); 673 } 674 675 void vex_prefix_0F38_q(Register dst, Register nds, Address src, bool no_mask_reg = false) { 676 bool vex_w = true; 677 int vector_len = AVX_128bit; 678 vex_prefix(src, nds->encoding(), dst->encoding(), 679 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, 680 vector_len, no_mask_reg); 681 } 682 683 void vex_prefix_0F38_q_legacy(Register dst, Register nds, Address src, bool no_mask_reg = false) { 684 bool vex_w = true; 685 int vector_len = AVX_128bit; 686 vex_prefix(src, nds->encoding(), dst->encoding(), 687 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, 688 vector_len, true, no_mask_reg); 689 } 690 691 int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, 692 VexSimdPrefix pre, VexOpcode opc, 693 bool vex_w, int vector_len, 694 bool legacy_mode, bool no_mask_reg); 695 696 int vex_prefix_0F38_and_encode(Register dst, Register nds, Register src, bool no_mask_reg = false) { 697 bool vex_w = false; 698 int vector_len = AVX_128bit; 699 return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), 700 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len, 701 false, no_mask_reg); 702 } 703 704 int vex_prefix_0F38_and_encode_legacy(Register dst, Register nds, Register src, bool no_mask_reg = false) { 705 bool vex_w = false; 706 int vector_len = AVX_128bit; 707 return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), 708 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len, 709 true, no_mask_reg); 710 } 711 712 int vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src, bool no_mask_reg = false) { 713 bool vex_w = true; 714 int vector_len = AVX_128bit; 715 return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), 716 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len, 717 false, no_mask_reg); 718 } 719 720 int vex_prefix_0F38_and_encode_q_legacy(Register dst, Register nds, Register src, bool no_mask_reg = false) { 721 bool vex_w = true; 722 int vector_len = AVX_128bit; 723 return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), 724 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len, 725 true, no_mask_reg); 726 } 727 728 int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, 729 VexSimdPrefix pre, int vector_len = AVX_128bit, 730 VexOpcode opc = VEX_OPCODE_0F, bool legacy_mode = false, 731 bool no_mask_reg = false) { 732 int src_enc = src->encoding(); 733 int dst_enc = dst->encoding(); 734 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 735 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector_len, legacy_mode, no_mask_reg); 736 } 737 738 void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, 739 VexSimdPrefix pre, bool no_mask_reg, VexOpcode opc = VEX_OPCODE_0F, 740 bool rex_w = false, int vector_len = AVX_128bit, bool legacy_mode = false); 741 742 void simd_prefix(XMMRegister dst, Address src, VexSimdPrefix pre, 743 bool no_mask_reg, VexOpcode opc = VEX_OPCODE_0F) { 744 simd_prefix(dst, xnoreg, src, pre, no_mask_reg, opc); 745 } 746 747 void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg) { 748 simd_prefix(src, dst, pre, no_mask_reg); 749 } 750 void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src, 751 VexSimdPrefix pre, bool no_mask_reg = false) { 752 bool rex_w = true; 753 simd_prefix(dst, nds, src, pre, no_mask_reg, VEX_OPCODE_0F, rex_w); 754 } 755 756 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, 757 VexSimdPrefix pre, bool no_mask_reg, 758 VexOpcode opc = VEX_OPCODE_0F, 759 bool rex_w = false, int vector_len = AVX_128bit, 760 bool legacy_mode = false); 761 762 int kreg_prefix_and_encode(KRegister dst, KRegister nds, KRegister src, 763 VexSimdPrefix pre, bool no_mask_reg, 764 VexOpcode opc = VEX_OPCODE_0F, 765 bool rex_w = false, int vector_len = AVX_128bit); 766 767 int kreg_prefix_and_encode(KRegister dst, KRegister nds, Register src, 768 VexSimdPrefix pre, bool no_mask_reg, 769 VexOpcode opc = VEX_OPCODE_0F, 770 bool rex_w = false, int vector_len = AVX_128bit); 771 772 // Move/convert 32-bit integer value. 773 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src, 774 VexSimdPrefix pre, bool no_mask_reg) { 775 // It is OK to cast from Register to XMMRegister to pass argument here 776 // since only encoding is used in simd_prefix_and_encode() and number of 777 // Gen and Xmm registers are the same. 778 return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, no_mask_reg, VEX_OPCODE_0F); 779 } 780 int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre, bool no_mask_reg) { 781 return simd_prefix_and_encode(dst, xnoreg, src, pre, no_mask_reg); 782 } 783 int simd_prefix_and_encode(Register dst, XMMRegister src, 784 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, 785 bool no_mask_reg = false) { 786 return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, no_mask_reg, opc); 787 } 788 789 // Move/convert 64-bit integer value. 790 int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src, 791 VexSimdPrefix pre, bool no_mask_reg = false) { 792 bool rex_w = true; 793 return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, no_mask_reg, VEX_OPCODE_0F, rex_w); 794 } 795 int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre, bool no_mask_reg) { 796 return simd_prefix_and_encode_q(dst, xnoreg, src, pre, no_mask_reg); 797 } 798 int simd_prefix_and_encode_q(Register dst, XMMRegister src, 799 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, 800 bool no_mask_reg = false) { 801 bool rex_w = true; 802 return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, no_mask_reg, opc, rex_w); 803 } 804 805 // Helper functions for groups of instructions 806 void emit_arith_b(int op1, int op2, Register dst, int imm8); 807 808 void emit_arith(int op1, int op2, Register dst, int32_t imm32); 809 // Force generation of a 4 byte immediate value even if it fits into 8bit 810 void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32); 811 void emit_arith(int op1, int op2, Register dst, Register src); 812 813 void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false, bool legacy_mode = false); 814 void emit_simd_arith_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false); 815 void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false, bool legacy_mode = false); 816 void emit_simd_arith_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false); 817 void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false); 818 void emit_simd_arith_nonds_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false); 819 void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false, bool legacy_mode = false); 820 void emit_simd_arith_nonds_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false); 821 void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, 822 Address src, VexSimdPrefix pre, int vector_len, 823 bool no_mask_reg = false, bool legacy_mode = false); 824 void emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds, 825 Address src, VexSimdPrefix pre, int vector_len, 826 bool no_mask_reg = false); 827 void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, 828 XMMRegister src, VexSimdPrefix pre, int vector_len, 829 bool no_mask_reg = false, bool legacy_mode = false); 830 void emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds, 831 XMMRegister src, VexSimdPrefix pre, int vector_len, 832 bool no_mask_reg = false); 833 834 bool emit_compressed_disp_byte(int &disp); 835 836 void emit_operand(Register reg, 837 Register base, Register index, Address::ScaleFactor scale, 838 int disp, 839 RelocationHolder const& rspec, 840 int rip_relative_correction = 0); 841 842 void emit_operand(Register reg, Address adr, int rip_relative_correction = 0); 843 844 // operands that only take the original 32bit registers 845 void emit_operand32(Register reg, Address adr); 846 847 void emit_operand(XMMRegister reg, 848 Register base, Register index, Address::ScaleFactor scale, 849 int disp, 850 RelocationHolder const& rspec); 851 852 void emit_operand(XMMRegister reg, Address adr); 853 854 void emit_operand(MMXRegister reg, Address adr); 855 856 // workaround gcc (3.2.1-7) bug 857 void emit_operand(Address adr, MMXRegister reg); 858 859 860 // Immediate-to-memory forms 861 void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32); 862 863 void emit_farith(int b1, int b2, int i); 864 865 866 protected: 867 #ifdef ASSERT 868 void check_relocation(RelocationHolder const& rspec, int format); 869 #endif 870 871 void emit_data(jint data, relocInfo::relocType rtype, int format); 872 void emit_data(jint data, RelocationHolder const& rspec, int format); 873 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 874 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 875 876 bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); 877 878 // These are all easily abused and hence protected 879 880 // 32BIT ONLY SECTION 881 #ifndef _LP64 882 // Make these disappear in 64bit mode since they would never be correct 883 void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 884 void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 885 886 void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 887 void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 888 889 void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 890 #else 891 // 64BIT ONLY SECTION 892 void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY 893 894 void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec); 895 void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec); 896 897 void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec); 898 void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec); 899 #endif // _LP64 900 901 // These are unique in that we are ensured by the caller that the 32bit 902 // relative in these instructions will always be able to reach the potentially 903 // 64bit address described by entry. Since they can take a 64bit address they 904 // don't have the 32 suffix like the other instructions in this class. 905 906 void call_literal(address entry, RelocationHolder const& rspec); 907 void jmp_literal(address entry, RelocationHolder const& rspec); 908 909 // Avoid using directly section 910 // Instructions in this section are actually usable by anyone without danger 911 // of failure but have performance issues that are addressed my enhanced 912 // instructions which will do the proper thing base on the particular cpu. 913 // We protect them because we don't trust you... 914 915 // Don't use next inc() and dec() methods directly. INC & DEC instructions 916 // could cause a partial flag stall since they don't set CF flag. 917 // Use MacroAssembler::decrement() & MacroAssembler::increment() methods 918 // which call inc() & dec() or add() & sub() in accordance with 919 // the product flag UseIncDec value. 920 921 void decl(Register dst); 922 void decl(Address dst); 923 void decq(Register dst); 924 void decq(Address dst); 925 926 void incl(Register dst); 927 void incl(Address dst); 928 void incq(Register dst); 929 void incq(Address dst); 930 931 // New cpus require use of movsd and movss to avoid partial register stall 932 // when loading from memory. But for old Opteron use movlpd instead of movsd. 933 // The selection is done in MacroAssembler::movdbl() and movflt(). 934 935 // Move Scalar Single-Precision Floating-Point Values 936 void movss(XMMRegister dst, Address src); 937 void movss(XMMRegister dst, XMMRegister src); 938 void movss(Address dst, XMMRegister src); 939 940 // Move Scalar Double-Precision Floating-Point Values 941 void movsd(XMMRegister dst, Address src); 942 void movsd(XMMRegister dst, XMMRegister src); 943 void movsd(Address dst, XMMRegister src); 944 void movlpd(XMMRegister dst, Address src); 945 946 // New cpus require use of movaps and movapd to avoid partial register stall 947 // when moving between registers. 948 void movaps(XMMRegister dst, XMMRegister src); 949 void movapd(XMMRegister dst, XMMRegister src); 950 951 // End avoid using directly 952 953 954 // Instruction prefixes 955 void prefix(Prefix p); 956 957 public: 958 959 // Creation 960 Assembler(CodeBuffer* code) : AbstractAssembler(code) { 961 init_attributes(); 962 } 963 964 // Decoding 965 static address locate_operand(address inst, WhichOperand which); 966 static address locate_next_instruction(address inst); 967 968 // Utilities 969 static bool is_polling_page_far() NOT_LP64({ return false;}); 970 static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 971 int cur_tuple_type, int in_size_in_bits, int cur_encoding); 972 973 // Generic instructions 974 // Does 32bit or 64bit as needed for the platform. In some sense these 975 // belong in macro assembler but there is no need for both varieties to exist 976 977 void init_attributes(void) { 978 evex_encoding = 0; 979 input_size_in_bits = 0; 980 avx_vector_len = AVX_NoVec; 981 tuple_type = EVEX_ETUP; 982 is_evex_instruction = false; 983 } 984 985 void lea(Register dst, Address src); 986 987 void mov(Register dst, Register src); 988 989 void pusha(); 990 void popa(); 991 992 void pushf(); 993 void popf(); 994 995 void push(int32_t imm32); 996 997 void push(Register src); 998 999 void pop(Register dst); 1000 1001 // These are dummies to prevent surprise implicit conversions to Register 1002 void push(void* v); 1003 void pop(void* v); 1004 1005 // These do register sized moves/scans 1006 void rep_mov(); 1007 void rep_stos(); 1008 void rep_stosb(); 1009 void repne_scan(); 1010 #ifdef _LP64 1011 void repne_scanl(); 1012 #endif 1013 1014 // Vanilla instructions in lexical order 1015 1016 void adcl(Address dst, int32_t imm32); 1017 void adcl(Address dst, Register src); 1018 void adcl(Register dst, int32_t imm32); 1019 void adcl(Register dst, Address src); 1020 void adcl(Register dst, Register src); 1021 1022 void adcq(Register dst, int32_t imm32); 1023 void adcq(Register dst, Address src); 1024 void adcq(Register dst, Register src); 1025 1026 void addl(Address dst, int32_t imm32); 1027 void addl(Address dst, Register src); 1028 void addl(Register dst, int32_t imm32); 1029 void addl(Register dst, Address src); 1030 void addl(Register dst, Register src); 1031 1032 void addq(Address dst, int32_t imm32); 1033 void addq(Address dst, Register src); 1034 void addq(Register dst, int32_t imm32); 1035 void addq(Register dst, Address src); 1036 void addq(Register dst, Register src); 1037 1038 #ifdef _LP64 1039 //Add Unsigned Integers with Carry Flag 1040 void adcxq(Register dst, Register src); 1041 1042 //Add Unsigned Integers with Overflow Flag 1043 void adoxq(Register dst, Register src); 1044 #endif 1045 1046 void addr_nop_4(); 1047 void addr_nop_5(); 1048 void addr_nop_7(); 1049 void addr_nop_8(); 1050 1051 // Add Scalar Double-Precision Floating-Point Values 1052 void addsd(XMMRegister dst, Address src); 1053 void addsd(XMMRegister dst, XMMRegister src); 1054 1055 // Add Scalar Single-Precision Floating-Point Values 1056 void addss(XMMRegister dst, Address src); 1057 void addss(XMMRegister dst, XMMRegister src); 1058 1059 // AES instructions 1060 void aesdec(XMMRegister dst, Address src); 1061 void aesdec(XMMRegister dst, XMMRegister src); 1062 void aesdeclast(XMMRegister dst, Address src); 1063 void aesdeclast(XMMRegister dst, XMMRegister src); 1064 void aesenc(XMMRegister dst, Address src); 1065 void aesenc(XMMRegister dst, XMMRegister src); 1066 void aesenclast(XMMRegister dst, Address src); 1067 void aesenclast(XMMRegister dst, XMMRegister src); 1068 1069 1070 void andl(Address dst, int32_t imm32); 1071 void andl(Register dst, int32_t imm32); 1072 void andl(Register dst, Address src); 1073 void andl(Register dst, Register src); 1074 1075 void andq(Address dst, int32_t imm32); 1076 void andq(Register dst, int32_t imm32); 1077 void andq(Register dst, Address src); 1078 void andq(Register dst, Register src); 1079 1080 // BMI instructions 1081 void andnl(Register dst, Register src1, Register src2); 1082 void andnl(Register dst, Register src1, Address src2); 1083 void andnq(Register dst, Register src1, Register src2); 1084 void andnq(Register dst, Register src1, Address src2); 1085 1086 void blsil(Register dst, Register src); 1087 void blsil(Register dst, Address src); 1088 void blsiq(Register dst, Register src); 1089 void blsiq(Register dst, Address src); 1090 1091 void blsmskl(Register dst, Register src); 1092 void blsmskl(Register dst, Address src); 1093 void blsmskq(Register dst, Register src); 1094 void blsmskq(Register dst, Address src); 1095 1096 void blsrl(Register dst, Register src); 1097 void blsrl(Register dst, Address src); 1098 void blsrq(Register dst, Register src); 1099 void blsrq(Register dst, Address src); 1100 1101 void bsfl(Register dst, Register src); 1102 void bsrl(Register dst, Register src); 1103 1104 #ifdef _LP64 1105 void bsfq(Register dst, Register src); 1106 void bsrq(Register dst, Register src); 1107 #endif 1108 1109 void bswapl(Register reg); 1110 1111 void bswapq(Register reg); 1112 1113 void call(Label& L, relocInfo::relocType rtype); 1114 void call(Register reg); // push pc; pc <- reg 1115 void call(Address adr); // push pc; pc <- adr 1116 1117 void cdql(); 1118 1119 void cdqq(); 1120 1121 void cld(); 1122 1123 void clflush(Address adr); 1124 1125 void cmovl(Condition cc, Register dst, Register src); 1126 void cmovl(Condition cc, Register dst, Address src); 1127 1128 void cmovq(Condition cc, Register dst, Register src); 1129 void cmovq(Condition cc, Register dst, Address src); 1130 1131 1132 void cmpb(Address dst, int imm8); 1133 1134 void cmpl(Address dst, int32_t imm32); 1135 1136 void cmpl(Register dst, int32_t imm32); 1137 void cmpl(Register dst, Register src); 1138 void cmpl(Register dst, Address src); 1139 1140 void cmpq(Address dst, int32_t imm32); 1141 void cmpq(Address dst, Register src); 1142 1143 void cmpq(Register dst, int32_t imm32); 1144 void cmpq(Register dst, Register src); 1145 void cmpq(Register dst, Address src); 1146 1147 // these are dummies used to catch attempting to convert NULL to Register 1148 void cmpl(Register dst, void* junk); // dummy 1149 void cmpq(Register dst, void* junk); // dummy 1150 1151 void cmpw(Address dst, int imm16); 1152 1153 void cmpxchg8 (Address adr); 1154 1155 void cmpxchgb(Register reg, Address adr); 1156 void cmpxchgl(Register reg, Address adr); 1157 1158 void cmpxchgq(Register reg, Address adr); 1159 1160 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 1161 void comisd(XMMRegister dst, Address src); 1162 void comisd(XMMRegister dst, XMMRegister src); 1163 1164 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 1165 void comiss(XMMRegister dst, Address src); 1166 void comiss(XMMRegister dst, XMMRegister src); 1167 1168 // Identify processor type and features 1169 void cpuid(); 1170 1171 // CRC32C 1172 void crc32(Register crc, Register v, int8_t sizeInBytes); 1173 void crc32(Register crc, Address adr, int8_t sizeInBytes); 1174 1175 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value 1176 void cvtsd2ss(XMMRegister dst, XMMRegister src); 1177 void cvtsd2ss(XMMRegister dst, Address src); 1178 1179 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value 1180 void cvtsi2sdl(XMMRegister dst, Register src); 1181 void cvtsi2sdl(XMMRegister dst, Address src); 1182 void cvtsi2sdq(XMMRegister dst, Register src); 1183 void cvtsi2sdq(XMMRegister dst, Address src); 1184 1185 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value 1186 void cvtsi2ssl(XMMRegister dst, Register src); 1187 void cvtsi2ssl(XMMRegister dst, Address src); 1188 void cvtsi2ssq(XMMRegister dst, Register src); 1189 void cvtsi2ssq(XMMRegister dst, Address src); 1190 1191 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value 1192 void cvtdq2pd(XMMRegister dst, XMMRegister src); 1193 1194 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value 1195 void cvtdq2ps(XMMRegister dst, XMMRegister src); 1196 1197 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value 1198 void cvtss2sd(XMMRegister dst, XMMRegister src); 1199 void cvtss2sd(XMMRegister dst, Address src); 1200 1201 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer 1202 void cvttsd2sil(Register dst, Address src); 1203 void cvttsd2sil(Register dst, XMMRegister src); 1204 void cvttsd2siq(Register dst, XMMRegister src); 1205 1206 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer 1207 void cvttss2sil(Register dst, XMMRegister src); 1208 void cvttss2siq(Register dst, XMMRegister src); 1209 1210 // Divide Scalar Double-Precision Floating-Point Values 1211 void divsd(XMMRegister dst, Address src); 1212 void divsd(XMMRegister dst, XMMRegister src); 1213 1214 // Divide Scalar Single-Precision Floating-Point Values 1215 void divss(XMMRegister dst, Address src); 1216 void divss(XMMRegister dst, XMMRegister src); 1217 1218 void emms(); 1219 1220 void fabs(); 1221 1222 void fadd(int i); 1223 1224 void fadd_d(Address src); 1225 void fadd_s(Address src); 1226 1227 // "Alternate" versions of x87 instructions place result down in FPU 1228 // stack instead of on TOS 1229 1230 void fadda(int i); // "alternate" fadd 1231 void faddp(int i = 1); 1232 1233 void fchs(); 1234 1235 void fcom(int i); 1236 1237 void fcomp(int i = 1); 1238 void fcomp_d(Address src); 1239 void fcomp_s(Address src); 1240 1241 void fcompp(); 1242 1243 void fcos(); 1244 1245 void fdecstp(); 1246 1247 void fdiv(int i); 1248 void fdiv_d(Address src); 1249 void fdivr_s(Address src); 1250 void fdiva(int i); // "alternate" fdiv 1251 void fdivp(int i = 1); 1252 1253 void fdivr(int i); 1254 void fdivr_d(Address src); 1255 void fdiv_s(Address src); 1256 1257 void fdivra(int i); // "alternate" reversed fdiv 1258 1259 void fdivrp(int i = 1); 1260 1261 void ffree(int i = 0); 1262 1263 void fild_d(Address adr); 1264 void fild_s(Address adr); 1265 1266 void fincstp(); 1267 1268 void finit(); 1269 1270 void fist_s (Address adr); 1271 void fistp_d(Address adr); 1272 void fistp_s(Address adr); 1273 1274 void fld1(); 1275 1276 void fld_d(Address adr); 1277 void fld_s(Address adr); 1278 void fld_s(int index); 1279 void fld_x(Address adr); // extended-precision (80-bit) format 1280 1281 void fldcw(Address src); 1282 1283 void fldenv(Address src); 1284 1285 void fldlg2(); 1286 1287 void fldln2(); 1288 1289 void fldz(); 1290 1291 void flog(); 1292 void flog10(); 1293 1294 void fmul(int i); 1295 1296 void fmul_d(Address src); 1297 void fmul_s(Address src); 1298 1299 void fmula(int i); // "alternate" fmul 1300 1301 void fmulp(int i = 1); 1302 1303 void fnsave(Address dst); 1304 1305 void fnstcw(Address src); 1306 1307 void fnstsw_ax(); 1308 1309 void fprem(); 1310 void fprem1(); 1311 1312 void frstor(Address src); 1313 1314 void fsin(); 1315 1316 void fsqrt(); 1317 1318 void fst_d(Address adr); 1319 void fst_s(Address adr); 1320 1321 void fstp_d(Address adr); 1322 void fstp_d(int index); 1323 void fstp_s(Address adr); 1324 void fstp_x(Address adr); // extended-precision (80-bit) format 1325 1326 void fsub(int i); 1327 void fsub_d(Address src); 1328 void fsub_s(Address src); 1329 1330 void fsuba(int i); // "alternate" fsub 1331 1332 void fsubp(int i = 1); 1333 1334 void fsubr(int i); 1335 void fsubr_d(Address src); 1336 void fsubr_s(Address src); 1337 1338 void fsubra(int i); // "alternate" reversed fsub 1339 1340 void fsubrp(int i = 1); 1341 1342 void ftan(); 1343 1344 void ftst(); 1345 1346 void fucomi(int i = 1); 1347 void fucomip(int i = 1); 1348 1349 void fwait(); 1350 1351 void fxch(int i = 1); 1352 1353 void fxrstor(Address src); 1354 1355 void fxsave(Address dst); 1356 1357 void fyl2x(); 1358 void frndint(); 1359 void f2xm1(); 1360 void fldl2e(); 1361 1362 void hlt(); 1363 1364 void idivl(Register src); 1365 void divl(Register src); // Unsigned division 1366 1367 #ifdef _LP64 1368 void idivq(Register src); 1369 #endif 1370 1371 void imull(Register dst, Register src); 1372 void imull(Register dst, Register src, int value); 1373 void imull(Register dst, Address src); 1374 1375 #ifdef _LP64 1376 void imulq(Register dst, Register src); 1377 void imulq(Register dst, Register src, int value); 1378 void imulq(Register dst, Address src); 1379 #endif 1380 1381 // jcc is the generic conditional branch generator to run- 1382 // time routines, jcc is used for branches to labels. jcc 1383 // takes a branch opcode (cc) and a label (L) and generates 1384 // either a backward branch or a forward branch and links it 1385 // to the label fixup chain. Usage: 1386 // 1387 // Label L; // unbound label 1388 // jcc(cc, L); // forward branch to unbound label 1389 // bind(L); // bind label to the current pc 1390 // jcc(cc, L); // backward branch to bound label 1391 // bind(L); // illegal: a label may be bound only once 1392 // 1393 // Note: The same Label can be used for forward and backward branches 1394 // but it may be bound only once. 1395 1396 void jcc(Condition cc, Label& L, bool maybe_short = true); 1397 1398 // Conditional jump to a 8-bit offset to L. 1399 // WARNING: be very careful using this for forward jumps. If the label is 1400 // not bound within an 8-bit offset of this instruction, a run-time error 1401 // will occur. 1402 void jccb(Condition cc, Label& L); 1403 1404 void jmp(Address entry); // pc <- entry 1405 1406 // Label operations & relative jumps (PPUM Appendix D) 1407 void jmp(Label& L, bool maybe_short = true); // unconditional jump to L 1408 1409 void jmp(Register entry); // pc <- entry 1410 1411 // Unconditional 8-bit offset jump to L. 1412 // WARNING: be very careful using this for forward jumps. If the label is 1413 // not bound within an 8-bit offset of this instruction, a run-time error 1414 // will occur. 1415 void jmpb(Label& L); 1416 1417 void ldmxcsr( Address src ); 1418 1419 void leal(Register dst, Address src); 1420 1421 void leaq(Register dst, Address src); 1422 1423 void lfence(); 1424 1425 void lock(); 1426 1427 void lzcntl(Register dst, Register src); 1428 1429 #ifdef _LP64 1430 void lzcntq(Register dst, Register src); 1431 #endif 1432 1433 enum Membar_mask_bits { 1434 StoreStore = 1 << 3, 1435 LoadStore = 1 << 2, 1436 StoreLoad = 1 << 1, 1437 LoadLoad = 1 << 0 1438 }; 1439 1440 // Serializes memory and blows flags 1441 void membar(Membar_mask_bits order_constraint) { 1442 if (os::is_MP()) { 1443 // We only have to handle StoreLoad 1444 if (order_constraint & StoreLoad) { 1445 // All usable chips support "locked" instructions which suffice 1446 // as barriers, and are much faster than the alternative of 1447 // using cpuid instruction. We use here a locked add [esp-C],0. 1448 // This is conveniently otherwise a no-op except for blowing 1449 // flags, and introducing a false dependency on target memory 1450 // location. We can't do anything with flags, but we can avoid 1451 // memory dependencies in the current method by locked-adding 1452 // somewhere else on the stack. Doing [esp+C] will collide with 1453 // something on stack in current method, hence we go for [esp-C]. 1454 // It is convenient since it is almost always in data cache, for 1455 // any small C. We need to step back from SP to avoid data 1456 // dependencies with other things on below SP (callee-saves, for 1457 // example). Without a clear way to figure out the minimal safe 1458 // distance from SP, it makes sense to step back the complete 1459 // cache line, as this will also avoid possible second-order effects 1460 // with locked ops against the cache line. Our choice of offset 1461 // is bounded by x86 operand encoding, which should stay within 1462 // [-128; +127] to have the 8-byte displacement encoding. 1463 // 1464 // Any change to this code may need to revisit other places in 1465 // the code where this idiom is used, in particular the 1466 // orderAccess code. 1467 1468 int offset = -VM_Version::L1_line_size(); 1469 if (offset < -128) { 1470 offset = -128; 1471 } 1472 1473 lock(); 1474 addl(Address(rsp, offset), 0);// Assert the lock# signal here 1475 } 1476 } 1477 } 1478 1479 void mfence(); 1480 1481 // Moves 1482 1483 void mov64(Register dst, int64_t imm64); 1484 1485 void movb(Address dst, Register src); 1486 void movb(Address dst, int imm8); 1487 void movb(Register dst, Address src); 1488 1489 void kmovq(KRegister dst, KRegister src); 1490 void kmovql(KRegister dst, Register src); 1491 void kmovdl(KRegister dst, Register src); 1492 void kmovq(Address dst, KRegister src); 1493 void kmovq(KRegister dst, Address src); 1494 1495 void movdl(XMMRegister dst, Register src); 1496 void movdl(Register dst, XMMRegister src); 1497 void movdl(XMMRegister dst, Address src); 1498 void movdl(Address dst, XMMRegister src); 1499 1500 // Move Double Quadword 1501 void movdq(XMMRegister dst, Register src); 1502 void movdq(Register dst, XMMRegister src); 1503 1504 // Move Aligned Double Quadword 1505 void movdqa(XMMRegister dst, XMMRegister src); 1506 void movdqa(XMMRegister dst, Address src); 1507 1508 // Move Unaligned Double Quadword 1509 void movdqu(Address dst, XMMRegister src); 1510 void movdqu(XMMRegister dst, Address src); 1511 void movdqu(XMMRegister dst, XMMRegister src); 1512 1513 // Move Unaligned 256bit Vector 1514 void vmovdqu(Address dst, XMMRegister src); 1515 void vmovdqu(XMMRegister dst, Address src); 1516 void vmovdqu(XMMRegister dst, XMMRegister src); 1517 1518 // Move Unaligned 512bit Vector 1519 void evmovdqu(Address dst, XMMRegister src, int vector_len); 1520 void evmovdqu(XMMRegister dst, Address src, int vector_len); 1521 void evmovdqu(XMMRegister dst, XMMRegister src, int vector_len); 1522 1523 // Move lower 64bit to high 64bit in 128bit register 1524 void movlhps(XMMRegister dst, XMMRegister src); 1525 1526 void movl(Register dst, int32_t imm32); 1527 void movl(Address dst, int32_t imm32); 1528 void movl(Register dst, Register src); 1529 void movl(Register dst, Address src); 1530 void movl(Address dst, Register src); 1531 1532 // These dummies prevent using movl from converting a zero (like NULL) into Register 1533 // by giving the compiler two choices it can't resolve 1534 1535 void movl(Address dst, void* junk); 1536 void movl(Register dst, void* junk); 1537 1538 #ifdef _LP64 1539 void movq(Register dst, Register src); 1540 void movq(Register dst, Address src); 1541 void movq(Address dst, Register src); 1542 #endif 1543 1544 void movq(Address dst, MMXRegister src ); 1545 void movq(MMXRegister dst, Address src ); 1546 1547 #ifdef _LP64 1548 // These dummies prevent using movq from converting a zero (like NULL) into Register 1549 // by giving the compiler two choices it can't resolve 1550 1551 void movq(Address dst, void* dummy); 1552 void movq(Register dst, void* dummy); 1553 #endif 1554 1555 // Move Quadword 1556 void movq(Address dst, XMMRegister src); 1557 void movq(XMMRegister dst, Address src); 1558 1559 void movsbl(Register dst, Address src); 1560 void movsbl(Register dst, Register src); 1561 1562 #ifdef _LP64 1563 void movsbq(Register dst, Address src); 1564 void movsbq(Register dst, Register src); 1565 1566 // Move signed 32bit immediate to 64bit extending sign 1567 void movslq(Address dst, int32_t imm64); 1568 void movslq(Register dst, int32_t imm64); 1569 1570 void movslq(Register dst, Address src); 1571 void movslq(Register dst, Register src); 1572 void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous 1573 #endif 1574 1575 void movswl(Register dst, Address src); 1576 void movswl(Register dst, Register src); 1577 1578 #ifdef _LP64 1579 void movswq(Register dst, Address src); 1580 void movswq(Register dst, Register src); 1581 #endif 1582 1583 void movw(Address dst, int imm16); 1584 void movw(Register dst, Address src); 1585 void movw(Address dst, Register src); 1586 1587 void movzbl(Register dst, Address src); 1588 void movzbl(Register dst, Register src); 1589 1590 #ifdef _LP64 1591 void movzbq(Register dst, Address src); 1592 void movzbq(Register dst, Register src); 1593 #endif 1594 1595 void movzwl(Register dst, Address src); 1596 void movzwl(Register dst, Register src); 1597 1598 #ifdef _LP64 1599 void movzwq(Register dst, Address src); 1600 void movzwq(Register dst, Register src); 1601 #endif 1602 1603 // Unsigned multiply with RAX destination register 1604 void mull(Address src); 1605 void mull(Register src); 1606 1607 #ifdef _LP64 1608 void mulq(Address src); 1609 void mulq(Register src); 1610 void mulxq(Register dst1, Register dst2, Register src); 1611 #endif 1612 1613 // Multiply Scalar Double-Precision Floating-Point Values 1614 void mulsd(XMMRegister dst, Address src); 1615 void mulsd(XMMRegister dst, XMMRegister src); 1616 1617 // Multiply Scalar Single-Precision Floating-Point Values 1618 void mulss(XMMRegister dst, Address src); 1619 void mulss(XMMRegister dst, XMMRegister src); 1620 1621 void negl(Register dst); 1622 1623 #ifdef _LP64 1624 void negq(Register dst); 1625 #endif 1626 1627 void nop(int i = 1); 1628 1629 void notl(Register dst); 1630 1631 #ifdef _LP64 1632 void notq(Register dst); 1633 #endif 1634 1635 void orl(Address dst, int32_t imm32); 1636 void orl(Register dst, int32_t imm32); 1637 void orl(Register dst, Address src); 1638 void orl(Register dst, Register src); 1639 void orl(Address dst, Register src); 1640 1641 void orq(Address dst, int32_t imm32); 1642 void orq(Register dst, int32_t imm32); 1643 void orq(Register dst, Address src); 1644 void orq(Register dst, Register src); 1645 1646 // Pack with unsigned saturation 1647 void packuswb(XMMRegister dst, XMMRegister src); 1648 void packuswb(XMMRegister dst, Address src); 1649 void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1650 1651 // Pemutation of 64bit words 1652 void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len); 1653 1654 void pause(); 1655 1656 // SSE4.2 string instructions 1657 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); 1658 void pcmpestri(XMMRegister xmm1, Address src, int imm8); 1659 1660 // SSE 4.1 extract 1661 void pextrd(Register dst, XMMRegister src, int imm8); 1662 void pextrq(Register dst, XMMRegister src, int imm8); 1663 1664 // SSE 4.1 insert 1665 void pinsrd(XMMRegister dst, Register src, int imm8); 1666 void pinsrq(XMMRegister dst, Register src, int imm8); 1667 1668 // SSE4.1 packed move 1669 void pmovzxbw(XMMRegister dst, XMMRegister src); 1670 void pmovzxbw(XMMRegister dst, Address src); 1671 1672 #ifndef _LP64 // no 32bit push/pop on amd64 1673 void popl(Address dst); 1674 #endif 1675 1676 #ifdef _LP64 1677 void popq(Address dst); 1678 #endif 1679 1680 void popcntl(Register dst, Address src); 1681 void popcntl(Register dst, Register src); 1682 1683 #ifdef _LP64 1684 void popcntq(Register dst, Address src); 1685 void popcntq(Register dst, Register src); 1686 #endif 1687 1688 // Prefetches (SSE, SSE2, 3DNOW only) 1689 1690 void prefetchnta(Address src); 1691 void prefetchr(Address src); 1692 void prefetcht0(Address src); 1693 void prefetcht1(Address src); 1694 void prefetcht2(Address src); 1695 void prefetchw(Address src); 1696 1697 // Shuffle Bytes 1698 void pshufb(XMMRegister dst, XMMRegister src); 1699 void pshufb(XMMRegister dst, Address src); 1700 1701 // Shuffle Packed Doublewords 1702 void pshufd(XMMRegister dst, XMMRegister src, int mode); 1703 void pshufd(XMMRegister dst, Address src, int mode); 1704 1705 // Shuffle Packed Low Words 1706 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1707 void pshuflw(XMMRegister dst, Address src, int mode); 1708 1709 // Shift Right by bytes Logical DoubleQuadword Immediate 1710 void psrldq(XMMRegister dst, int shift); 1711 // Shift Left by bytes Logical DoubleQuadword Immediate 1712 void pslldq(XMMRegister dst, int shift); 1713 1714 // Logical Compare 128bit 1715 void ptest(XMMRegister dst, XMMRegister src); 1716 void ptest(XMMRegister dst, Address src); 1717 // Logical Compare 256bit 1718 void vptest(XMMRegister dst, XMMRegister src); 1719 void vptest(XMMRegister dst, Address src); 1720 1721 // Interleave Low Bytes 1722 void punpcklbw(XMMRegister dst, XMMRegister src); 1723 void punpcklbw(XMMRegister dst, Address src); 1724 1725 // Interleave Low Doublewords 1726 void punpckldq(XMMRegister dst, XMMRegister src); 1727 void punpckldq(XMMRegister dst, Address src); 1728 1729 // Interleave Low Quadwords 1730 void punpcklqdq(XMMRegister dst, XMMRegister src); 1731 1732 #ifndef _LP64 // no 32bit push/pop on amd64 1733 void pushl(Address src); 1734 #endif 1735 1736 void pushq(Address src); 1737 1738 void rcll(Register dst, int imm8); 1739 1740 void rclq(Register dst, int imm8); 1741 1742 void rcrq(Register dst, int imm8); 1743 1744 void rdtsc(); 1745 1746 void ret(int imm16); 1747 1748 #ifdef _LP64 1749 void rorq(Register dst, int imm8); 1750 void rorxq(Register dst, Register src, int imm8); 1751 #endif 1752 1753 void sahf(); 1754 1755 void sarl(Register dst, int imm8); 1756 void sarl(Register dst); 1757 1758 void sarq(Register dst, int imm8); 1759 void sarq(Register dst); 1760 1761 void sbbl(Address dst, int32_t imm32); 1762 void sbbl(Register dst, int32_t imm32); 1763 void sbbl(Register dst, Address src); 1764 void sbbl(Register dst, Register src); 1765 1766 void sbbq(Address dst, int32_t imm32); 1767 void sbbq(Register dst, int32_t imm32); 1768 void sbbq(Register dst, Address src); 1769 void sbbq(Register dst, Register src); 1770 1771 void setb(Condition cc, Register dst); 1772 1773 void shldl(Register dst, Register src); 1774 void shldl(Register dst, Register src, int8_t imm8); 1775 1776 void shll(Register dst, int imm8); 1777 void shll(Register dst); 1778 1779 void shlq(Register dst, int imm8); 1780 void shlq(Register dst); 1781 1782 void shrdl(Register dst, Register src); 1783 1784 void shrl(Register dst, int imm8); 1785 void shrl(Register dst); 1786 1787 void shrq(Register dst, int imm8); 1788 void shrq(Register dst); 1789 1790 void smovl(); // QQQ generic? 1791 1792 // Compute Square Root of Scalar Double-Precision Floating-Point Value 1793 void sqrtsd(XMMRegister dst, Address src); 1794 void sqrtsd(XMMRegister dst, XMMRegister src); 1795 1796 // Compute Square Root of Scalar Single-Precision Floating-Point Value 1797 void sqrtss(XMMRegister dst, Address src); 1798 void sqrtss(XMMRegister dst, XMMRegister src); 1799 1800 void std(); 1801 1802 void stmxcsr( Address dst ); 1803 1804 void subl(Address dst, int32_t imm32); 1805 void subl(Address dst, Register src); 1806 void subl(Register dst, int32_t imm32); 1807 void subl(Register dst, Address src); 1808 void subl(Register dst, Register src); 1809 1810 void subq(Address dst, int32_t imm32); 1811 void subq(Address dst, Register src); 1812 void subq(Register dst, int32_t imm32); 1813 void subq(Register dst, Address src); 1814 void subq(Register dst, Register src); 1815 1816 // Force generation of a 4 byte immediate value even if it fits into 8bit 1817 void subl_imm32(Register dst, int32_t imm32); 1818 void subq_imm32(Register dst, int32_t imm32); 1819 1820 // Subtract Scalar Double-Precision Floating-Point Values 1821 void subsd(XMMRegister dst, Address src); 1822 void subsd(XMMRegister dst, XMMRegister src); 1823 1824 // Subtract Scalar Single-Precision Floating-Point Values 1825 void subss(XMMRegister dst, Address src); 1826 void subss(XMMRegister dst, XMMRegister src); 1827 1828 void testb(Register dst, int imm8); 1829 1830 void testl(Register dst, int32_t imm32); 1831 void testl(Register dst, Register src); 1832 void testl(Register dst, Address src); 1833 1834 void testq(Register dst, int32_t imm32); 1835 void testq(Register dst, Register src); 1836 1837 // BMI - count trailing zeros 1838 void tzcntl(Register dst, Register src); 1839 void tzcntq(Register dst, Register src); 1840 1841 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 1842 void ucomisd(XMMRegister dst, Address src); 1843 void ucomisd(XMMRegister dst, XMMRegister src); 1844 1845 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 1846 void ucomiss(XMMRegister dst, Address src); 1847 void ucomiss(XMMRegister dst, XMMRegister src); 1848 1849 void xabort(int8_t imm8); 1850 1851 void xaddl(Address dst, Register src); 1852 1853 void xaddq(Address dst, Register src); 1854 1855 void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none); 1856 1857 void xchgl(Register reg, Address adr); 1858 void xchgl(Register dst, Register src); 1859 1860 void xchgq(Register reg, Address adr); 1861 void xchgq(Register dst, Register src); 1862 1863 void xend(); 1864 1865 // Get Value of Extended Control Register 1866 void xgetbv(); 1867 1868 void xorl(Register dst, int32_t imm32); 1869 void xorl(Register dst, Address src); 1870 void xorl(Register dst, Register src); 1871 1872 void xorq(Register dst, Address src); 1873 void xorq(Register dst, Register src); 1874 1875 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 1876 1877 // AVX 3-operands scalar instructions (encoded with VEX prefix) 1878 1879 void vaddsd(XMMRegister dst, XMMRegister nds, Address src); 1880 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1881 void vaddss(XMMRegister dst, XMMRegister nds, Address src); 1882 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1883 void vdivsd(XMMRegister dst, XMMRegister nds, Address src); 1884 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1885 void vdivss(XMMRegister dst, XMMRegister nds, Address src); 1886 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1887 void vmulsd(XMMRegister dst, XMMRegister nds, Address src); 1888 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1889 void vmulss(XMMRegister dst, XMMRegister nds, Address src); 1890 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1891 void vsubsd(XMMRegister dst, XMMRegister nds, Address src); 1892 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1893 void vsubss(XMMRegister dst, XMMRegister nds, Address src); 1894 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1895 1896 1897 //====================VECTOR ARITHMETIC===================================== 1898 1899 // Add Packed Floating-Point Values 1900 void addpd(XMMRegister dst, XMMRegister src); 1901 void addps(XMMRegister dst, XMMRegister src); 1902 void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1903 void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1904 void vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1905 void vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1906 1907 // Subtract Packed Floating-Point Values 1908 void subpd(XMMRegister dst, XMMRegister src); 1909 void subps(XMMRegister dst, XMMRegister src); 1910 void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1911 void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1912 void vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1913 void vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1914 1915 // Multiply Packed Floating-Point Values 1916 void mulpd(XMMRegister dst, XMMRegister src); 1917 void mulps(XMMRegister dst, XMMRegister src); 1918 void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1919 void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1920 void vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1921 void vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1922 1923 // Divide Packed Floating-Point Values 1924 void divpd(XMMRegister dst, XMMRegister src); 1925 void divps(XMMRegister dst, XMMRegister src); 1926 void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1927 void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1928 void vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1929 void vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1930 1931 // Bitwise Logical AND of Packed Floating-Point Values 1932 void andpd(XMMRegister dst, XMMRegister src); 1933 void andps(XMMRegister dst, XMMRegister src); 1934 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1935 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1936 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1937 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1938 1939 // Bitwise Logical XOR of Packed Floating-Point Values 1940 void xorpd(XMMRegister dst, XMMRegister src); 1941 void xorps(XMMRegister dst, XMMRegister src); 1942 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1943 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1944 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1945 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1946 1947 // Add horizontal packed integers 1948 void vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1949 void vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1950 void phaddw(XMMRegister dst, XMMRegister src); 1951 void phaddd(XMMRegister dst, XMMRegister src); 1952 1953 // Add packed integers 1954 void paddb(XMMRegister dst, XMMRegister src); 1955 void paddw(XMMRegister dst, XMMRegister src); 1956 void paddd(XMMRegister dst, XMMRegister src); 1957 void paddq(XMMRegister dst, XMMRegister src); 1958 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1959 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1960 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1961 void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1962 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1963 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1964 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1965 void vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1966 1967 // Sub packed integers 1968 void psubb(XMMRegister dst, XMMRegister src); 1969 void psubw(XMMRegister dst, XMMRegister src); 1970 void psubd(XMMRegister dst, XMMRegister src); 1971 void psubq(XMMRegister dst, XMMRegister src); 1972 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1973 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1974 void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1975 void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1976 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1977 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1978 void vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1979 void vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1980 1981 // Multiply packed integers (only shorts and ints) 1982 void pmullw(XMMRegister dst, XMMRegister src); 1983 void pmulld(XMMRegister dst, XMMRegister src); 1984 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1985 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1986 void vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1987 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1988 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1989 void vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1990 1991 // Shift left packed integers 1992 void psllw(XMMRegister dst, int shift); 1993 void pslld(XMMRegister dst, int shift); 1994 void psllq(XMMRegister dst, int shift); 1995 void psllw(XMMRegister dst, XMMRegister shift); 1996 void pslld(XMMRegister dst, XMMRegister shift); 1997 void psllq(XMMRegister dst, XMMRegister shift); 1998 void vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 1999 void vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2000 void vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2001 void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2002 void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2003 void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2004 2005 // Logical shift right packed integers 2006 void psrlw(XMMRegister dst, int shift); 2007 void psrld(XMMRegister dst, int shift); 2008 void psrlq(XMMRegister dst, int shift); 2009 void psrlw(XMMRegister dst, XMMRegister shift); 2010 void psrld(XMMRegister dst, XMMRegister shift); 2011 void psrlq(XMMRegister dst, XMMRegister shift); 2012 void vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2013 void vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2014 void vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2015 void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2016 void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2017 void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2018 2019 // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs) 2020 void psraw(XMMRegister dst, int shift); 2021 void psrad(XMMRegister dst, int shift); 2022 void psraw(XMMRegister dst, XMMRegister shift); 2023 void psrad(XMMRegister dst, XMMRegister shift); 2024 void vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2025 void vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len); 2026 void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2027 void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len); 2028 2029 // And packed integers 2030 void pand(XMMRegister dst, XMMRegister src); 2031 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2032 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2033 2034 // Or packed integers 2035 void por(XMMRegister dst, XMMRegister src); 2036 void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2037 void vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2038 2039 // Xor packed integers 2040 void pxor(XMMRegister dst, XMMRegister src); 2041 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 2042 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 2043 2044 // Copy low 128bit into high 128bit of YMM registers. 2045 void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src); 2046 void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src); 2047 void vextractf128h(XMMRegister dst, XMMRegister src); 2048 void vextracti128h(XMMRegister dst, XMMRegister src); 2049 2050 // Load/store high 128bit of YMM registers which does not destroy other half. 2051 void vinsertf128h(XMMRegister dst, Address src); 2052 void vinserti128h(XMMRegister dst, Address src); 2053 void vextractf128h(Address dst, XMMRegister src); 2054 void vextracti128h(Address dst, XMMRegister src); 2055 2056 // Copy low 256bit into high 256bit of ZMM registers. 2057 void vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src); 2058 void vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src); 2059 void vextracti64x4h(XMMRegister dst, XMMRegister src); 2060 void vextractf64x4h(XMMRegister dst, XMMRegister src); 2061 void vextractf64x4h(Address dst, XMMRegister src); 2062 void vinsertf64x4h(XMMRegister dst, Address src); 2063 2064 // Copy targeted 128bit segments of the ZMM registers 2065 void vextracti64x2h(XMMRegister dst, XMMRegister src, int value); 2066 void vextractf64x2h(XMMRegister dst, XMMRegister src, int value); 2067 void vextractf32x4h(XMMRegister dst, XMMRegister src, int value); 2068 2069 // duplicate 4-bytes integer data from src into 8 locations in dest 2070 void vpbroadcastd(XMMRegister dst, XMMRegister src); 2071 2072 // duplicate n-bytes integer data from src into vector_len locations in dest 2073 void evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len); 2074 void evpbroadcastb(XMMRegister dst, Address src, int vector_len); 2075 void evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 2076 void evpbroadcastw(XMMRegister dst, Address src, int vector_len); 2077 void evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len); 2078 void evpbroadcastd(XMMRegister dst, Address src, int vector_len); 2079 void evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len); 2080 void evpbroadcastq(XMMRegister dst, Address src, int vector_len); 2081 2082 void evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len); 2083 void evpbroadcastss(XMMRegister dst, Address src, int vector_len); 2084 void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len); 2085 void evpbroadcastsd(XMMRegister dst, Address src, int vector_len); 2086 2087 void evpbroadcastb(XMMRegister dst, Register src, int vector_len); 2088 void evpbroadcastw(XMMRegister dst, Register src, int vector_len); 2089 void evpbroadcastd(XMMRegister dst, Register src, int vector_len); 2090 void evpbroadcastq(XMMRegister dst, Register src, int vector_len); 2091 2092 // Carry-Less Multiplication Quadword 2093 void pclmulqdq(XMMRegister dst, XMMRegister src, int mask); 2094 void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask); 2095 2096 // AVX instruction which is used to clear upper 128 bits of YMM registers and 2097 // to avoid transaction penalty between AVX and SSE states. There is no 2098 // penalty if legacy SSE instructions are encoded using VEX prefix because 2099 // they always clear upper 128 bits. It should be used before calling 2100 // runtime code and native libraries. 2101 void vzeroupper(); 2102 2103 protected: 2104 // Next instructions require address alignment 16 bytes SSE mode. 2105 // They should be called only from corresponding MacroAssembler instructions. 2106 void andpd(XMMRegister dst, Address src); 2107 void andps(XMMRegister dst, Address src); 2108 void xorpd(XMMRegister dst, Address src); 2109 void xorps(XMMRegister dst, Address src); 2110 2111 }; 2112 2113 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP