1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP
  26 #define CPU_X86_VM_ASSEMBLER_X86_HPP
  27 
  28 #include "asm/register.hpp"
  29 #include "vm_version_x86.hpp"
  30 
  31 class BiasedLockingCounters;
  32 
  33 // Contains all the definitions needed for x86 assembly code generation.
  34 
  35 // Calling convention
  36 class Argument {
  37  public:
  38   enum {
  39 #ifdef _LP64
  40 #ifdef _WIN64
  41     n_int_register_parameters_c   = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
  42     n_float_register_parameters_c = 4,  // xmm0 - xmm3 (c_farg0, c_farg1, ... )
  43 #else
  44     n_int_register_parameters_c   = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
  45     n_float_register_parameters_c = 8,  // xmm0 - xmm7 (c_farg0, c_farg1, ... )
  46 #endif // _WIN64
  47     n_int_register_parameters_j   = 6, // j_rarg0, j_rarg1, ...
  48     n_float_register_parameters_j = 8  // j_farg0, j_farg1, ...
  49 #else
  50     n_register_parameters = 0   // 0 registers used to pass arguments
  51 #endif // _LP64
  52   };
  53 };
  54 
  55 
  56 #ifdef _LP64
  57 // Symbolically name the register arguments used by the c calling convention.
  58 // Windows is different from linux/solaris. So much for standards...
  59 
  60 #ifdef _WIN64
  61 
  62 REGISTER_DECLARATION(Register, c_rarg0, rcx);
  63 REGISTER_DECLARATION(Register, c_rarg1, rdx);
  64 REGISTER_DECLARATION(Register, c_rarg2, r8);
  65 REGISTER_DECLARATION(Register, c_rarg3, r9);
  66 
  67 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
  68 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
  69 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
  70 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
  71 
  72 #else
  73 
  74 REGISTER_DECLARATION(Register, c_rarg0, rdi);
  75 REGISTER_DECLARATION(Register, c_rarg1, rsi);
  76 REGISTER_DECLARATION(Register, c_rarg2, rdx);
  77 REGISTER_DECLARATION(Register, c_rarg3, rcx);
  78 REGISTER_DECLARATION(Register, c_rarg4, r8);
  79 REGISTER_DECLARATION(Register, c_rarg5, r9);
  80 
  81 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
  82 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
  83 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
  84 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
  85 REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4);
  86 REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
  87 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
  88 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
  89 
  90 #endif // _WIN64
  91 
  92 // Symbolically name the register arguments used by the Java calling convention.
  93 // We have control over the convention for java so we can do what we please.
  94 // What pleases us is to offset the java calling convention so that when
  95 // we call a suitable jni method the arguments are lined up and we don't
  96 // have to do little shuffling. A suitable jni method is non-static and a
  97 // small number of arguments (two fewer args on windows)
  98 //
  99 //        |-------------------------------------------------------|
 100 //        | c_rarg0   c_rarg1  c_rarg2 c_rarg3 c_rarg4 c_rarg5    |
 101 //        |-------------------------------------------------------|
 102 //        | rcx       rdx      r8      r9      rdi*    rsi*       | windows (* not a c_rarg)
 103 //        | rdi       rsi      rdx     rcx     r8      r9         | solaris/linux
 104 //        |-------------------------------------------------------|
 105 //        | j_rarg5   j_rarg0  j_rarg1 j_rarg2 j_rarg3 j_rarg4    |
 106 //        |-------------------------------------------------------|
 107 
 108 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1);
 109 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2);
 110 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3);
 111 // Windows runs out of register args here
 112 #ifdef _WIN64
 113 REGISTER_DECLARATION(Register, j_rarg3, rdi);
 114 REGISTER_DECLARATION(Register, j_rarg4, rsi);
 115 #else
 116 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
 117 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
 118 #endif /* _WIN64 */
 119 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
 120 
 121 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
 122 REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1);
 123 REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2);
 124 REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3);
 125 REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4);
 126 REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5);
 127 REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6);
 128 REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
 129 
 130 REGISTER_DECLARATION(Register, rscratch1, r10);  // volatile
 131 REGISTER_DECLARATION(Register, rscratch2, r11);  // volatile
 132 
 133 REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
 134 REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
 135 
 136 #else
 137 // rscratch1 will apear in 32bit code that is dead but of course must compile
 138 // Using noreg ensures if the dead code is incorrectly live and executed it
 139 // will cause an assertion failure
 140 #define rscratch1 noreg
 141 #define rscratch2 noreg
 142 
 143 #endif // _LP64
 144 
 145 // JSR 292
 146 // On x86, the SP does not have to be saved when invoking method handle intrinsics
 147 // or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg.
 148 REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg);
 149 
 150 // Address is an abstraction used to represent a memory location
 151 // using any of the amd64 addressing modes with one object.
 152 //
 153 // Note: A register location is represented via a Register, not
 154 //       via an address for efficiency & simplicity reasons.
 155 
 156 class ArrayAddress;
 157 
 158 class Address {
 159  public:
 160   enum ScaleFactor {
 161     no_scale = -1,
 162     times_1  =  0,
 163     times_2  =  1,
 164     times_4  =  2,
 165     times_8  =  3,
 166     times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4)
 167   };
 168   static ScaleFactor times(int size) {
 169     assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size");
 170     if (size == 8)  return times_8;
 171     if (size == 4)  return times_4;
 172     if (size == 2)  return times_2;
 173     return times_1;
 174   }
 175   static int scale_size(ScaleFactor scale) {
 176     assert(scale != no_scale, "");
 177     assert(((1 << (int)times_1) == 1 &&
 178             (1 << (int)times_2) == 2 &&
 179             (1 << (int)times_4) == 4 &&
 180             (1 << (int)times_8) == 8), "");
 181     return (1 << (int)scale);
 182   }
 183 
 184  private:
 185   Register         _base;
 186   Register         _index;
 187   ScaleFactor      _scale;
 188   int              _disp;
 189   RelocationHolder _rspec;
 190 
 191   // Easily misused constructors make them private
 192   // %%% can we make these go away?
 193   NOT_LP64(Address(address loc, RelocationHolder spec);)
 194   Address(int disp, address loc, relocInfo::relocType rtype);
 195   Address(int disp, address loc, RelocationHolder spec);
 196 
 197  public:
 198 
 199  int disp() { return _disp; }
 200   // creation
 201   Address()
 202     : _base(noreg),
 203       _index(noreg),
 204       _scale(no_scale),
 205       _disp(0) {
 206   }
 207 
 208   // No default displacement otherwise Register can be implicitly
 209   // converted to 0(Register) which is quite a different animal.
 210 
 211   Address(Register base, int disp)
 212     : _base(base),
 213       _index(noreg),
 214       _scale(no_scale),
 215       _disp(disp) {
 216   }
 217 
 218   Address(Register base, Register index, ScaleFactor scale, int disp = 0)
 219     : _base (base),
 220       _index(index),
 221       _scale(scale),
 222       _disp (disp) {
 223     assert(!index->is_valid() == (scale == Address::no_scale),
 224            "inconsistent address");
 225   }
 226 
 227   Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0)
 228     : _base (base),
 229       _index(index.register_or_noreg()),
 230       _scale(scale),
 231       _disp (disp + (index.constant_or_zero() * scale_size(scale))) {
 232     if (!index.is_register())  scale = Address::no_scale;
 233     assert(!_index->is_valid() == (scale == Address::no_scale),
 234            "inconsistent address");
 235   }
 236 
 237   Address plus_disp(int disp) const {
 238     Address a = (*this);
 239     a._disp += disp;
 240     return a;
 241   }
 242   Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const {
 243     Address a = (*this);
 244     a._disp += disp.constant_or_zero() * scale_size(scale);
 245     if (disp.is_register()) {
 246       assert(!a.index()->is_valid(), "competing indexes");
 247       a._index = disp.as_register();
 248       a._scale = scale;
 249     }
 250     return a;
 251   }
 252   bool is_same_address(Address a) const {
 253     // disregard _rspec
 254     return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale;
 255   }
 256 
 257   // The following two overloads are used in connection with the
 258   // ByteSize type (see sizes.hpp).  They simplify the use of
 259   // ByteSize'd arguments in assembly code. Note that their equivalent
 260   // for the optimized build are the member functions with int disp
 261   // argument since ByteSize is mapped to an int type in that case.
 262   //
 263   // Note: DO NOT introduce similar overloaded functions for WordSize
 264   // arguments as in the optimized mode, both ByteSize and WordSize
 265   // are mapped to the same type and thus the compiler cannot make a
 266   // distinction anymore (=> compiler errors).
 267 
 268 #ifdef ASSERT
 269   Address(Register base, ByteSize disp)
 270     : _base(base),
 271       _index(noreg),
 272       _scale(no_scale),
 273       _disp(in_bytes(disp)) {
 274   }
 275 
 276   Address(Register base, Register index, ScaleFactor scale, ByteSize disp)
 277     : _base(base),
 278       _index(index),
 279       _scale(scale),
 280       _disp(in_bytes(disp)) {
 281     assert(!index->is_valid() == (scale == Address::no_scale),
 282            "inconsistent address");
 283   }
 284 
 285   Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp)
 286     : _base (base),
 287       _index(index.register_or_noreg()),
 288       _scale(scale),
 289       _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) {
 290     if (!index.is_register())  scale = Address::no_scale;
 291     assert(!_index->is_valid() == (scale == Address::no_scale),
 292            "inconsistent address");
 293   }
 294 
 295 #endif // ASSERT
 296 
 297   // accessors
 298   bool        uses(Register reg) const { return _base == reg || _index == reg; }
 299   Register    base()             const { return _base;  }
 300   Register    index()            const { return _index; }
 301   ScaleFactor scale()            const { return _scale; }
 302   int         disp()             const { return _disp;  }
 303 
 304   // Convert the raw encoding form into the form expected by the constructor for
 305   // Address.  An index of 4 (rsp) corresponds to having no index, so convert
 306   // that to noreg for the Address constructor.
 307   static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
 308 
 309   static Address make_array(ArrayAddress);
 310 
 311  private:
 312   bool base_needs_rex() const {
 313     return _base != noreg && _base->encoding() >= 8;
 314   }
 315 
 316   bool index_needs_rex() const {
 317     return _index != noreg &&_index->encoding() >= 8;
 318   }
 319 
 320   relocInfo::relocType reloc() const { return _rspec.type(); }
 321 
 322   friend class Assembler;
 323   friend class MacroAssembler;
 324   friend class LIR_Assembler; // base/index/scale/disp
 325 };
 326 
 327 //
 328 // AddressLiteral has been split out from Address because operands of this type
 329 // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out
 330 // the few instructions that need to deal with address literals are unique and the
 331 // MacroAssembler does not have to implement every instruction in the Assembler
 332 // in order to search for address literals that may need special handling depending
 333 // on the instruction and the platform. As small step on the way to merging i486/amd64
 334 // directories.
 335 //
 336 class AddressLiteral {
 337   friend class ArrayAddress;
 338   RelocationHolder _rspec;
 339   // Typically we use AddressLiterals we want to use their rval
 340   // However in some situations we want the lval (effect address) of the item.
 341   // We provide a special factory for making those lvals.
 342   bool _is_lval;
 343 
 344   // If the target is far we'll need to load the ea of this to
 345   // a register to reach it. Otherwise if near we can do rip
 346   // relative addressing.
 347 
 348   address          _target;
 349 
 350  protected:
 351   // creation
 352   AddressLiteral()
 353     : _is_lval(false),
 354       _target(NULL)
 355   {}
 356 
 357   public:
 358 
 359 
 360   AddressLiteral(address target, relocInfo::relocType rtype);
 361 
 362   AddressLiteral(address target, RelocationHolder const& rspec)
 363     : _rspec(rspec),
 364       _is_lval(false),
 365       _target(target)
 366   {}
 367 
 368   AddressLiteral addr() {
 369     AddressLiteral ret = *this;
 370     ret._is_lval = true;
 371     return ret;
 372   }
 373 
 374 
 375  private:
 376 
 377   address target() { return _target; }
 378   bool is_lval() { return _is_lval; }
 379 
 380   relocInfo::relocType reloc() const { return _rspec.type(); }
 381   const RelocationHolder& rspec() const { return _rspec; }
 382 
 383   friend class Assembler;
 384   friend class MacroAssembler;
 385   friend class Address;
 386   friend class LIR_Assembler;
 387 };
 388 
 389 // Convience classes
 390 class RuntimeAddress: public AddressLiteral {
 391 
 392   public:
 393 
 394   RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {}
 395 
 396 };
 397 
 398 class ExternalAddress: public AddressLiteral {
 399  private:
 400   static relocInfo::relocType reloc_for_target(address target) {
 401     // Sometimes ExternalAddress is used for values which aren't
 402     // exactly addresses, like the card table base.
 403     // external_word_type can't be used for values in the first page
 404     // so just skip the reloc in that case.
 405     return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
 406   }
 407 
 408  public:
 409 
 410   ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {}
 411 
 412 };
 413 
 414 class InternalAddress: public AddressLiteral {
 415 
 416   public:
 417 
 418   InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {}
 419 
 420 };
 421 
 422 // x86 can do array addressing as a single operation since disp can be an absolute
 423 // address amd64 can't. We create a class that expresses the concept but does extra
 424 // magic on amd64 to get the final result
 425 
 426 class ArrayAddress {
 427   private:
 428 
 429   AddressLiteral _base;
 430   Address        _index;
 431 
 432   public:
 433 
 434   ArrayAddress() {};
 435   ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {};
 436   AddressLiteral base() { return _base; }
 437   Address index() { return _index; }
 438 
 439 };
 440 
 441 class InstructionAttr;
 442 
 443 // 64-bit refect the fxsave size which is 512 bytes and the new xsave area on EVEX which is another 2176 bytes
 444 // See fxsave and xsave(EVEX enabled) documentation for layout
 445 const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY(2688 / wordSize);
 446 
 447 // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction
 448 // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write
 449 // is what you get. The Assembler is generating code into a CodeBuffer.
 450 
 451 class Assembler : public AbstractAssembler  {
 452   friend class AbstractAssembler; // for the non-virtual hack
 453   friend class LIR_Assembler; // as_Address()
 454   friend class StubGenerator;
 455 
 456  public:
 457   enum Condition {                     // The x86 condition codes used for conditional jumps/moves.
 458     zero          = 0x4,
 459     notZero       = 0x5,
 460     equal         = 0x4,
 461     notEqual      = 0x5,
 462     less          = 0xc,
 463     lessEqual     = 0xe,
 464     greater       = 0xf,
 465     greaterEqual  = 0xd,
 466     below         = 0x2,
 467     belowEqual    = 0x6,
 468     above         = 0x7,
 469     aboveEqual    = 0x3,
 470     overflow      = 0x0,
 471     noOverflow    = 0x1,
 472     carrySet      = 0x2,
 473     carryClear    = 0x3,
 474     negative      = 0x8,
 475     positive      = 0x9,
 476     parity        = 0xa,
 477     noParity      = 0xb
 478   };
 479 
 480   enum Prefix {
 481     // segment overrides
 482     CS_segment = 0x2e,
 483     SS_segment = 0x36,
 484     DS_segment = 0x3e,
 485     ES_segment = 0x26,
 486     FS_segment = 0x64,
 487     GS_segment = 0x65,
 488 
 489     REX        = 0x40,
 490 
 491     REX_B      = 0x41,
 492     REX_X      = 0x42,
 493     REX_XB     = 0x43,
 494     REX_R      = 0x44,
 495     REX_RB     = 0x45,
 496     REX_RX     = 0x46,
 497     REX_RXB    = 0x47,
 498 
 499     REX_W      = 0x48,
 500 
 501     REX_WB     = 0x49,
 502     REX_WX     = 0x4A,
 503     REX_WXB    = 0x4B,
 504     REX_WR     = 0x4C,
 505     REX_WRB    = 0x4D,
 506     REX_WRX    = 0x4E,
 507     REX_WRXB   = 0x4F,
 508 
 509     VEX_3bytes = 0xC4,
 510     VEX_2bytes = 0xC5,
 511     EVEX_4bytes = 0x62,
 512     Prefix_EMPTY = 0x0
 513   };
 514 
 515   enum VexPrefix {
 516     VEX_B = 0x20,
 517     VEX_X = 0x40,
 518     VEX_R = 0x80,
 519     VEX_W = 0x80
 520   };
 521 
 522   enum ExexPrefix {
 523     EVEX_F  = 0x04,
 524     EVEX_V  = 0x08,
 525     EVEX_Rb = 0x10,
 526     EVEX_X  = 0x40,
 527     EVEX_Z  = 0x80
 528   };
 529 
 530   enum VexSimdPrefix {
 531     VEX_SIMD_NONE = 0x0,
 532     VEX_SIMD_66   = 0x1,
 533     VEX_SIMD_F3   = 0x2,
 534     VEX_SIMD_F2   = 0x3
 535   };
 536 
 537   enum VexOpcode {
 538     VEX_OPCODE_NONE  = 0x0,
 539     VEX_OPCODE_0F    = 0x1,
 540     VEX_OPCODE_0F_38 = 0x2,
 541     VEX_OPCODE_0F_3A = 0x3,
 542     VEX_OPCODE_MASK  = 0x1F
 543   };
 544 
 545   enum AvxVectorLen {
 546     AVX_128bit = 0x0,
 547     AVX_256bit = 0x1,
 548     AVX_512bit = 0x2,
 549     AVX_NoVec  = 0x4
 550   };
 551 
 552   enum EvexTupleType {
 553     EVEX_FV   = 0,
 554     EVEX_HV   = 4,
 555     EVEX_FVM  = 6,
 556     EVEX_T1S  = 7,
 557     EVEX_T1F  = 11,
 558     EVEX_T2   = 13,
 559     EVEX_T4   = 15,
 560     EVEX_T8   = 17,
 561     EVEX_HVM  = 18,
 562     EVEX_QVM  = 19,
 563     EVEX_OVM  = 20,
 564     EVEX_M128 = 21,
 565     EVEX_DUP  = 22,
 566     EVEX_ETUP = 23
 567   };
 568 
 569   enum EvexInputSizeInBits {
 570     EVEX_8bit  = 0,
 571     EVEX_16bit = 1,
 572     EVEX_32bit = 2,
 573     EVEX_64bit = 3,
 574     EVEX_NObit = 4
 575   };
 576 
 577   enum WhichOperand {
 578     // input to locate_operand, and format code for relocations
 579     imm_operand  = 0,            // embedded 32-bit|64-bit immediate operand
 580     disp32_operand = 1,          // embedded 32-bit displacement or address
 581     call32_operand = 2,          // embedded 32-bit self-relative displacement
 582 #ifndef _LP64
 583     _WhichOperand_limit = 3
 584 #else
 585      narrow_oop_operand = 3,     // embedded 32-bit immediate narrow oop
 586     _WhichOperand_limit = 4
 587 #endif
 588   };
 589 
 590   // Comparison predicates for integral types & FP types when using SSE
 591   enum ComparisonPredicate {
 592     eq = 0,
 593     lt = 1,
 594     le = 2,
 595     _false = 3,
 596     neq = 4,
 597     nlt = 5,
 598     nle = 6,
 599     _true = 7
 600   };
 601 
 602   // Comparison predicates for FP types when using AVX
 603   // O means ordered. U is unordered. When using ordered, any NaN comparison is false. Otherwise, it is true.
 604   // S means signaling. Q means non-signaling. When signaling is true, instruction signals #IA on NaN.
 605   enum ComparisonPredicateFP {
 606     EQ_OQ = 0,
 607     LT_OS = 1,
 608     LE_OS = 2,
 609     UNORD_Q = 3,
 610     NEQ_UQ = 4,
 611     NLT_US = 5,
 612     NLE_US = 6,
 613     ORD_Q = 7,
 614     EQ_UQ = 8,
 615     NGE_US = 9,
 616     NGT_US = 0xA,
 617     FALSE_OQ = 0XB,
 618     NEQ_OQ = 0xC,
 619     GE_OS = 0xD,
 620     GT_OS = 0xE,
 621     TRUE_UQ = 0xF,
 622     EQ_OS = 0x10,
 623     LT_OQ = 0x11,
 624     LE_OQ = 0x12,
 625     UNORD_S = 0x13,
 626     NEQ_US = 0x14,
 627     NLT_UQ = 0x15,
 628     NLE_UQ = 0x16,
 629     ORD_S = 0x17,
 630     EQ_US = 0x18,
 631     NGE_UQ = 0x19,
 632     NGT_UQ = 0x1A,
 633     FALSE_OS = 0x1B,
 634     NEQ_OS = 0x1C,
 635     GE_OQ = 0x1D,
 636     GT_OQ = 0x1E,
 637     TRUE_US =0x1F
 638   };
 639 
 640 
 641   // NOTE: The general philopsophy of the declarations here is that 64bit versions
 642   // of instructions are freely declared without the need for wrapping them an ifdef.
 643   // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
 644   // In the .cpp file the implementations are wrapped so that they are dropped out
 645   // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL
 646   // to the size it was prior to merging up the 32bit and 64bit assemblers.
 647   //
 648   // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
 649   // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
 650 
 651 private:
 652 
 653   bool _legacy_mode_bw;
 654   bool _legacy_mode_dq;
 655   bool _legacy_mode_vl;
 656   bool _legacy_mode_vlbw;
 657   bool _is_managed;
 658   bool _vector_masking;    // For stub code use only
 659 
 660   class InstructionAttr *_attributes;
 661 
 662   // 64bit prefixes
 663   int prefix_and_encode(int reg_enc, bool byteinst = false);
 664   int prefixq_and_encode(int reg_enc);
 665 
 666   int prefix_and_encode(int dst_enc, int src_enc) {
 667     return prefix_and_encode(dst_enc, false, src_enc, false);
 668   }
 669   int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte);
 670   int prefixq_and_encode(int dst_enc, int src_enc);
 671 
 672   void prefix(Register reg);
 673   void prefix(Register dst, Register src, Prefix p);
 674   void prefix(Register dst, Address adr, Prefix p);
 675   void prefix(Address adr);
 676   void prefixq(Address adr);
 677 
 678   void prefix(Address adr, Register reg,  bool byteinst = false);
 679   void prefix(Address adr, XMMRegister reg);
 680   void prefixq(Address adr, Register reg);
 681   void prefixq(Address adr, XMMRegister reg);
 682 
 683   void prefetch_prefix(Address src);
 684 
 685   void rex_prefix(Address adr, XMMRegister xreg,
 686                   VexSimdPrefix pre, VexOpcode opc, bool rex_w);
 687   int  rex_prefix_and_encode(int dst_enc, int src_enc,
 688                              VexSimdPrefix pre, VexOpcode opc, bool rex_w);
 689 
 690   void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc);
 691 
 692   void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v,
 693                    int nds_enc, VexSimdPrefix pre, VexOpcode opc);
 694 
 695   void vex_prefix(Address adr, int nds_enc, int xreg_enc,
 696                   VexSimdPrefix pre, VexOpcode opc,
 697                   InstructionAttr *attributes);
 698 
 699   int  vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
 700                              VexSimdPrefix pre, VexOpcode opc,
 701                              InstructionAttr *attributes);
 702 
 703   void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre,
 704                    VexOpcode opc, InstructionAttr *attributes);
 705 
 706   int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre,
 707                              VexOpcode opc, InstructionAttr *attributes);
 708 
 709   // Helper functions for groups of instructions
 710   void emit_arith_b(int op1, int op2, Register dst, int imm8);
 711 
 712   void emit_arith(int op1, int op2, Register dst, int32_t imm32);
 713   // Force generation of a 4 byte immediate value even if it fits into 8bit
 714   void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
 715   void emit_arith(int op1, int op2, Register dst, Register src);
 716 
 717   bool emit_compressed_disp_byte(int &disp);
 718 
 719   void emit_operand(Register reg,
 720                     Register base, Register index, Address::ScaleFactor scale,
 721                     int disp,
 722                     RelocationHolder const& rspec,
 723                     int rip_relative_correction = 0);
 724 
 725   void emit_operand(Register reg, Address adr, int rip_relative_correction = 0);
 726 
 727   // operands that only take the original 32bit registers
 728   void emit_operand32(Register reg, Address adr);
 729 
 730   void emit_operand(XMMRegister reg,
 731                     Register base, Register index, Address::ScaleFactor scale,
 732                     int disp,
 733                     RelocationHolder const& rspec);
 734 
 735   void emit_operand(XMMRegister reg, Address adr);
 736 
 737   void emit_operand(MMXRegister reg, Address adr);
 738 
 739   // workaround gcc (3.2.1-7) bug
 740   void emit_operand(Address adr, MMXRegister reg);
 741 
 742 
 743   // Immediate-to-memory forms
 744   void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32);
 745 
 746   void emit_farith(int b1, int b2, int i);
 747 
 748 
 749  protected:
 750   #ifdef ASSERT
 751   void check_relocation(RelocationHolder const& rspec, int format);
 752   #endif
 753 
 754   void emit_data(jint data, relocInfo::relocType    rtype, int format);
 755   void emit_data(jint data, RelocationHolder const& rspec, int format);
 756   void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
 757   void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
 758 
 759   bool reachable(AddressLiteral adr) NOT_LP64({ return true;});
 760 
 761   // These are all easily abused and hence protected
 762 
 763   // 32BIT ONLY SECTION
 764 #ifndef _LP64
 765   // Make these disappear in 64bit mode since they would never be correct
 766   void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec);   // 32BIT ONLY
 767   void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec);    // 32BIT ONLY
 768 
 769   void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec);    // 32BIT ONLY
 770   void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec);     // 32BIT ONLY
 771 
 772   void push_literal32(int32_t imm32, RelocationHolder const& rspec);                 // 32BIT ONLY
 773 #else
 774   // 64BIT ONLY SECTION
 775   void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec);   // 64BIT ONLY
 776 
 777   void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec);
 778   void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec);
 779 
 780   void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec);
 781   void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec);
 782 #endif // _LP64
 783 
 784   // These are unique in that we are ensured by the caller that the 32bit
 785   // relative in these instructions will always be able to reach the potentially
 786   // 64bit address described by entry. Since they can take a 64bit address they
 787   // don't have the 32 suffix like the other instructions in this class.
 788 
 789   void call_literal(address entry, RelocationHolder const& rspec);
 790   void jmp_literal(address entry, RelocationHolder const& rspec);
 791 
 792   // Avoid using directly section
 793   // Instructions in this section are actually usable by anyone without danger
 794   // of failure but have performance issues that are addressed my enhanced
 795   // instructions which will do the proper thing base on the particular cpu.
 796   // We protect them because we don't trust you...
 797 
 798   // Don't use next inc() and dec() methods directly. INC & DEC instructions
 799   // could cause a partial flag stall since they don't set CF flag.
 800   // Use MacroAssembler::decrement() & MacroAssembler::increment() methods
 801   // which call inc() & dec() or add() & sub() in accordance with
 802   // the product flag UseIncDec value.
 803 
 804   void decl(Register dst);
 805   void decl(Address dst);
 806   void decq(Register dst);
 807   void decq(Address dst);
 808 
 809   void incl(Register dst);
 810   void incl(Address dst);
 811   void incq(Register dst);
 812   void incq(Address dst);
 813 
 814   // New cpus require use of movsd and movss to avoid partial register stall
 815   // when loading from memory. But for old Opteron use movlpd instead of movsd.
 816   // The selection is done in MacroAssembler::movdbl() and movflt().
 817 
 818   // Move Scalar Single-Precision Floating-Point Values
 819   void movss(XMMRegister dst, Address src);
 820   void movss(XMMRegister dst, XMMRegister src);
 821   void movss(Address dst, XMMRegister src);
 822 
 823   // Move Scalar Double-Precision Floating-Point Values
 824   void movsd(XMMRegister dst, Address src);
 825   void movsd(XMMRegister dst, XMMRegister src);
 826   void movsd(Address dst, XMMRegister src);
 827   void movlpd(XMMRegister dst, Address src);
 828 
 829   // New cpus require use of movaps and movapd to avoid partial register stall
 830   // when moving between registers.
 831   void movaps(XMMRegister dst, XMMRegister src);
 832   void movapd(XMMRegister dst, XMMRegister src);
 833 
 834   // End avoid using directly
 835 
 836 
 837   // Instruction prefixes
 838   void prefix(Prefix p);
 839 
 840   public:
 841 
 842   // Creation
 843   Assembler(CodeBuffer* code) : AbstractAssembler(code) {
 844     init_attributes();
 845   }
 846 
 847   // Decoding
 848   static address locate_operand(address inst, WhichOperand which);
 849   static address locate_next_instruction(address inst);
 850 
 851   // Utilities
 852   static bool is_polling_page_far() NOT_LP64({ return false;});
 853   static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
 854                                          int cur_tuple_type, int in_size_in_bits, int cur_encoding);
 855 
 856   // Generic instructions
 857   // Does 32bit or 64bit as needed for the platform. In some sense these
 858   // belong in macro assembler but there is no need for both varieties to exist
 859 
 860   void init_attributes(void) {
 861     _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
 862     _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
 863     _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
 864     _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
 865     _is_managed = false;
 866     _vector_masking = false;
 867     _attributes = NULL;
 868   }
 869 
 870   void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
 871   void clear_attributes(void) { _attributes = NULL; }
 872 
 873   void set_managed(void) { _is_managed = true; }
 874   void clear_managed(void) { _is_managed = false; }
 875   bool is_managed(void) { return _is_managed; }
 876 
 877   // Following functions are for stub code use only
 878   void set_vector_masking(void) { _vector_masking = true; }
 879   void clear_vector_masking(void) { _vector_masking = false; }
 880   bool is_vector_masking(void) { return _vector_masking; }
 881 
 882   void lea(Register dst, Address src);
 883 
 884   void mov(Register dst, Register src);
 885 
 886   void pusha();
 887   void popa();
 888 
 889   void pushf();
 890   void popf();
 891 
 892   void push(int32_t imm32);
 893 
 894   void push(Register src);
 895 
 896   void pop(Register dst);
 897 
 898   // These are dummies to prevent surprise implicit conversions to Register
 899   void push(void* v);
 900   void pop(void* v);
 901 
 902   // These do register sized moves/scans
 903   void rep_mov();
 904   void rep_stos();
 905   void rep_stosb();
 906   void repne_scan();
 907 #ifdef _LP64
 908   void repne_scanl();
 909 #endif
 910 
 911   // Vanilla instructions in lexical order
 912 
 913   void adcl(Address dst, int32_t imm32);
 914   void adcl(Address dst, Register src);
 915   void adcl(Register dst, int32_t imm32);
 916   void adcl(Register dst, Address src);
 917   void adcl(Register dst, Register src);
 918 
 919   void adcq(Register dst, int32_t imm32);
 920   void adcq(Register dst, Address src);
 921   void adcq(Register dst, Register src);
 922 
 923   void addb(Register dst, Register src);
 924   void addb(Address dst, int imm8);
 925   void addw(Register dst, Register src);
 926   void addw(Address dst, int imm16);
 927 
 928   void addl(Address dst, int32_t imm32);
 929   void addl(Address dst, Register src);
 930   void addl(Register dst, int32_t imm32);
 931   void addl(Register dst, Address src);
 932   void addl(Register dst, Register src);
 933 
 934   void addq(Address dst, int32_t imm32);
 935   void addq(Address dst, Register src);
 936   void addq(Register dst, int32_t imm32);
 937   void addq(Register dst, Address src);
 938   void addq(Register dst, Register src);
 939 
 940 #ifdef _LP64
 941  //Add Unsigned Integers with Carry Flag
 942   void adcxq(Register dst, Register src);
 943 
 944  //Add Unsigned Integers with Overflow Flag
 945   void adoxq(Register dst, Register src);
 946 #endif
 947 
 948   void addr_nop_4();
 949   void addr_nop_5();
 950   void addr_nop_7();
 951   void addr_nop_8();
 952 
 953   // Add Scalar Double-Precision Floating-Point Values
 954   void addsd(XMMRegister dst, Address src);
 955   void addsd(XMMRegister dst, XMMRegister src);
 956 
 957   // Add Scalar Single-Precision Floating-Point Values
 958   void addss(XMMRegister dst, Address src);
 959   void addss(XMMRegister dst, XMMRegister src);
 960 
 961   // AES instructions
 962   void aesdec(XMMRegister dst, Address src);
 963   void aesdec(XMMRegister dst, XMMRegister src);
 964   void aesdeclast(XMMRegister dst, Address src);
 965   void aesdeclast(XMMRegister dst, XMMRegister src);
 966   void aesenc(XMMRegister dst, Address src);
 967   void aesenc(XMMRegister dst, XMMRegister src);
 968   void aesenclast(XMMRegister dst, Address src);
 969   void aesenclast(XMMRegister dst, XMMRegister src);
 970 
 971   void andb(Register dst, Register src);
 972   void andw(Register dst, Register src);
 973 
 974   void andl(Address  dst, int32_t imm32);
 975   void andl(Register dst, int32_t imm32);
 976   void andl(Register dst, Address src);
 977   void andl(Register dst, Register src);
 978 
 979   void andq(Address  dst, int32_t imm32);
 980   void andq(Register dst, int32_t imm32);
 981   void andq(Register dst, Address src);
 982   void andq(Register dst, Register src);
 983 
 984   // BMI instructions
 985   void andnl(Register dst, Register src1, Register src2);
 986   void andnl(Register dst, Register src1, Address src2);
 987   void andnq(Register dst, Register src1, Register src2);
 988   void andnq(Register dst, Register src1, Address src2);
 989 
 990   void blsil(Register dst, Register src);
 991   void blsil(Register dst, Address src);
 992   void blsiq(Register dst, Register src);
 993   void blsiq(Register dst, Address src);
 994 
 995   void blsmskl(Register dst, Register src);
 996   void blsmskl(Register dst, Address src);
 997   void blsmskq(Register dst, Register src);
 998   void blsmskq(Register dst, Address src);
 999 
1000   void blsrl(Register dst, Register src);
1001   void blsrl(Register dst, Address src);
1002   void blsrq(Register dst, Register src);
1003   void blsrq(Register dst, Address src);
1004 
1005   void bsfl(Register dst, Register src);
1006   void bsrl(Register dst, Register src);
1007 
1008 #ifdef _LP64
1009   void bsfq(Register dst, Register src);
1010   void bsrq(Register dst, Register src);
1011 #endif
1012 
1013   void bswapl(Register reg);
1014 
1015   void bswapq(Register reg);
1016 
1017   void call(Label& L, relocInfo::relocType rtype);
1018   void call(Register reg);  // push pc; pc <- reg
1019   void call(Address adr);   // push pc; pc <- adr
1020 
1021   void cdql();
1022 
1023   void cdqq();
1024 
1025   void cld();
1026 
1027   void clflush(Address adr);
1028 
1029   void cmovl(Condition cc, Register dst, Register src);
1030   void cmovl(Condition cc, Register dst, Address src);
1031 
1032   void cmovq(Condition cc, Register dst, Register src);
1033   void cmovq(Condition cc, Register dst, Address src);
1034 
1035 
1036   void cmpb(Address dst, int imm8);
1037 
1038   void cmpl(Address dst, int32_t imm32);
1039 
1040   void cmpl(Register dst, int32_t imm32);
1041   void cmpl(Register dst, Register src);
1042   void cmpl(Register dst, Address src);
1043 
1044   void cmpq(Address dst, int32_t imm32);
1045   void cmpq(Address dst, Register src);
1046 
1047   void cmpq(Register dst, int32_t imm32);
1048   void cmpq(Register dst, Register src);
1049   void cmpq(Register dst, Address src);
1050 
1051   // these are dummies used to catch attempting to convert NULL to Register
1052   void cmpl(Register dst, void* junk); // dummy
1053   void cmpq(Register dst, void* junk); // dummy
1054 
1055   void cmpw(Address dst, int imm16);
1056 
1057   void cmpxchg8 (Address adr);
1058 
1059   void cmpxchgb(Register reg, Address adr);
1060   void cmpxchgl(Register reg, Address adr);
1061 
1062   void cmpxchgq(Register reg, Address adr);
1063 
1064   // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
1065   void comisd(XMMRegister dst, Address src);
1066   void comisd(XMMRegister dst, XMMRegister src);
1067 
1068   // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
1069   void comiss(XMMRegister dst, Address src);
1070   void comiss(XMMRegister dst, XMMRegister src);
1071 
1072   // Identify processor type and features
1073   void cpuid();
1074 
1075   // CRC32C
1076   void crc32(Register crc, Register v, int8_t sizeInBytes);
1077   void crc32(Register crc, Address adr, int8_t sizeInBytes);
1078 
1079   // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
1080   void cvtsd2ss(XMMRegister dst, XMMRegister src);
1081   void cvtsd2ss(XMMRegister dst, Address src);
1082 
1083   // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
1084   void cvtsi2sdl(XMMRegister dst, Register src);
1085   void cvtsi2sdl(XMMRegister dst, Address src);
1086   void cvtsi2sdq(XMMRegister dst, Register src);
1087   void cvtsi2sdq(XMMRegister dst, Address src);
1088 
1089   // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
1090   void cvtsi2ssl(XMMRegister dst, Register src);
1091   void cvtsi2ssl(XMMRegister dst, Address src);
1092   void cvtsi2ssq(XMMRegister dst, Register src);
1093   void cvtsi2ssq(XMMRegister dst, Address src);
1094 
1095   // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
1096   void cvtdq2pd(XMMRegister dst, XMMRegister src);
1097 
1098   // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value
1099   void cvtdq2ps(XMMRegister dst, XMMRegister src);
1100 
1101   // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
1102   void cvtss2sd(XMMRegister dst, XMMRegister src);
1103   void cvtss2sd(XMMRegister dst, Address src);
1104 
1105   // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
1106   void cvttsd2sil(Register dst, Address src);
1107   void cvttsd2sil(Register dst, XMMRegister src);
1108   void cvttsd2siq(Register dst, XMMRegister src);
1109 
1110   // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
1111   void cvttss2sil(Register dst, XMMRegister src);
1112   void cvttss2siq(Register dst, XMMRegister src);
1113 
1114   void cvttpd2dq(XMMRegister dst, XMMRegister src);
1115 
1116   // Convert vector float and double
1117   void vcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len);
1118   void evcvtps2pd(XMMRegister dst, XMMRegister src, int vector_len);
1119 
1120   //Abs of packed Integer values
1121   void pabsb(XMMRegister dst, XMMRegister src);
1122   void pabsw(XMMRegister dst, XMMRegister src);
1123   void pabsd(XMMRegister dst, XMMRegister src);
1124   void vpabsb(XMMRegister dst, XMMRegister src, int vector_len);
1125   void vpabsw(XMMRegister dst, XMMRegister src, int vector_len);
1126   void vpabsd(XMMRegister dst, XMMRegister src, int vector_len);
1127   void evpabsb(XMMRegister dst, XMMRegister src, int vector_len);
1128   void evpabsw(XMMRegister dst, XMMRegister src, int vector_len);
1129   void evpabsd(XMMRegister dst, XMMRegister src, int vector_len);
1130   void evpabsq(XMMRegister dst, XMMRegister src, int vector_len);
1131 
1132   // Divide Scalar Double-Precision Floating-Point Values
1133   void divsd(XMMRegister dst, Address src);
1134   void divsd(XMMRegister dst, XMMRegister src);
1135 
1136   // Divide Scalar Single-Precision Floating-Point Values
1137   void divss(XMMRegister dst, Address src);
1138   void divss(XMMRegister dst, XMMRegister src);
1139 
1140   void emms();
1141 
1142   void fabs();
1143 
1144   void fadd(int i);
1145 
1146   void fadd_d(Address src);
1147   void fadd_s(Address src);
1148 
1149   // "Alternate" versions of x87 instructions place result down in FPU
1150   // stack instead of on TOS
1151 
1152   void fadda(int i); // "alternate" fadd
1153   void faddp(int i = 1);
1154 
1155   void fchs();
1156 
1157   void fcom(int i);
1158 
1159   void fcomp(int i = 1);
1160   void fcomp_d(Address src);
1161   void fcomp_s(Address src);
1162 
1163   void fcompp();
1164 
1165   void fcos();
1166 
1167   void fdecstp();
1168 
1169   void fdiv(int i);
1170   void fdiv_d(Address src);
1171   void fdivr_s(Address src);
1172   void fdiva(int i);  // "alternate" fdiv
1173   void fdivp(int i = 1);
1174 
1175   void fdivr(int i);
1176   void fdivr_d(Address src);
1177   void fdiv_s(Address src);
1178 
1179   void fdivra(int i); // "alternate" reversed fdiv
1180 
1181   void fdivrp(int i = 1);
1182 
1183   void ffree(int i = 0);
1184 
1185   void fild_d(Address adr);
1186   void fild_s(Address adr);
1187 
1188   void fincstp();
1189 
1190   void finit();
1191 
1192   void fist_s (Address adr);
1193   void fistp_d(Address adr);
1194   void fistp_s(Address adr);
1195 
1196   void fld1();
1197 
1198   void fld_d(Address adr);
1199   void fld_s(Address adr);
1200   void fld_s(int index);
1201   void fld_x(Address adr);  // extended-precision (80-bit) format
1202 
1203   void fldcw(Address src);
1204 
1205   void fldenv(Address src);
1206 
1207   void fldlg2();
1208 
1209   void fldln2();
1210 
1211   void fldz();
1212 
1213   void flog();
1214   void flog10();
1215 
1216   void fmul(int i);
1217 
1218   void fmul_d(Address src);
1219   void fmul_s(Address src);
1220 
1221   void fmula(int i);  // "alternate" fmul
1222 
1223   void fmulp(int i = 1);
1224 
1225   void fnsave(Address dst);
1226 
1227   void fnstcw(Address src);
1228 
1229   void fnstsw_ax();
1230 
1231   void fprem();
1232   void fprem1();
1233 
1234   void frstor(Address src);
1235 
1236   void fsin();
1237 
1238   void fsqrt();
1239 
1240   void fst_d(Address adr);
1241   void fst_s(Address adr);
1242 
1243   void fstp_d(Address adr);
1244   void fstp_d(int index);
1245   void fstp_s(Address adr);
1246   void fstp_x(Address adr); // extended-precision (80-bit) format
1247 
1248   void fsub(int i);
1249   void fsub_d(Address src);
1250   void fsub_s(Address src);
1251 
1252   void fsuba(int i);  // "alternate" fsub
1253 
1254   void fsubp(int i = 1);
1255 
1256   void fsubr(int i);
1257   void fsubr_d(Address src);
1258   void fsubr_s(Address src);
1259 
1260   void fsubra(int i); // "alternate" reversed fsub
1261 
1262   void fsubrp(int i = 1);
1263 
1264   void ftan();
1265 
1266   void ftst();
1267 
1268   void fucomi(int i = 1);
1269   void fucomip(int i = 1);
1270 
1271   void fwait();
1272 
1273   void fxch(int i = 1);
1274 
1275   void fxrstor(Address src);
1276   void xrstor(Address src);
1277 
1278   void fxsave(Address dst);
1279   void xsave(Address dst);
1280 
1281   void fyl2x();
1282   void frndint();
1283   void f2xm1();
1284   void fldl2e();
1285 
1286   void hlt();
1287 
1288   void idivl(Register src);
1289   void divl(Register src); // Unsigned division
1290 
1291 #ifdef _LP64
1292   void idivq(Register src);
1293 #endif
1294 
1295   void imull(Register src);
1296   void imull(Register dst, Register src);
1297   void imull(Register dst, Register src, int value);
1298   void imull(Register dst, Address src);
1299 
1300 #ifdef _LP64
1301   void imulq(Register dst, Register src);
1302   void imulq(Register dst, Register src, int value);
1303   void imulq(Register dst, Address src);
1304 #endif
1305 
1306   // jcc is the generic conditional branch generator to run-
1307   // time routines, jcc is used for branches to labels. jcc
1308   // takes a branch opcode (cc) and a label (L) and generates
1309   // either a backward branch or a forward branch and links it
1310   // to the label fixup chain. Usage:
1311   //
1312   // Label L;      // unbound label
1313   // jcc(cc, L);   // forward branch to unbound label
1314   // bind(L);      // bind label to the current pc
1315   // jcc(cc, L);   // backward branch to bound label
1316   // bind(L);      // illegal: a label may be bound only once
1317   //
1318   // Note: The same Label can be used for forward and backward branches
1319   // but it may be bound only once.
1320 
1321   void jcc(Condition cc, Label& L, bool maybe_short = true);
1322 
1323   // Conditional jump to a 8-bit offset to L.
1324   // WARNING: be very careful using this for forward jumps.  If the label is
1325   // not bound within an 8-bit offset of this instruction, a run-time error
1326   // will occur.
1327   void jccb(Condition cc, Label& L);
1328 
1329   void jmp(Address entry);    // pc <- entry
1330 
1331   // Label operations & relative jumps (PPUM Appendix D)
1332   void jmp(Label& L, bool maybe_short = true);   // unconditional jump to L
1333 
1334   void jmp(Register entry); // pc <- entry
1335 
1336   // Unconditional 8-bit offset jump to L.
1337   // WARNING: be very careful using this for forward jumps.  If the label is
1338   // not bound within an 8-bit offset of this instruction, a run-time error
1339   // will occur.
1340   void jmpb(Label& L);
1341 
1342   void ldmxcsr( Address src );
1343 
1344   void leal(Register dst, Address src);
1345 
1346   void leaq(Register dst, Address src);
1347 
1348   void lfence();
1349 
1350   void lock();
1351 
1352   void lzcntl(Register dst, Register src);
1353 
1354 #ifdef _LP64
1355   void lzcntq(Register dst, Register src);
1356 #endif
1357 
1358   enum Membar_mask_bits {
1359     StoreStore = 1 << 3,
1360     LoadStore  = 1 << 2,
1361     StoreLoad  = 1 << 1,
1362     LoadLoad   = 1 << 0
1363   };
1364 
1365   // Serializes memory and blows flags
1366   void membar(Membar_mask_bits order_constraint) {
1367     if (os::is_MP()) {
1368       // We only have to handle StoreLoad
1369       if (order_constraint & StoreLoad) {
1370         // All usable chips support "locked" instructions which suffice
1371         // as barriers, and are much faster than the alternative of
1372         // using cpuid instruction. We use here a locked add [esp-C],0.
1373         // This is conveniently otherwise a no-op except for blowing
1374         // flags, and introducing a false dependency on target memory
1375         // location. We can't do anything with flags, but we can avoid
1376         // memory dependencies in the current method by locked-adding
1377         // somewhere else on the stack. Doing [esp+C] will collide with
1378         // something on stack in current method, hence we go for [esp-C].
1379         // It is convenient since it is almost always in data cache, for
1380         // any small C.  We need to step back from SP to avoid data
1381         // dependencies with other things on below SP (callee-saves, for
1382         // example). Without a clear way to figure out the minimal safe
1383         // distance from SP, it makes sense to step back the complete
1384         // cache line, as this will also avoid possible second-order effects
1385         // with locked ops against the cache line. Our choice of offset
1386         // is bounded by x86 operand encoding, which should stay within
1387         // [-128; +127] to have the 8-byte displacement encoding.
1388         //
1389         // Any change to this code may need to revisit other places in
1390         // the code where this idiom is used, in particular the
1391         // orderAccess code.
1392 
1393         int offset = -VM_Version::L1_line_size();
1394         if (offset < -128) {
1395           offset = -128;
1396         }
1397 
1398         lock();
1399         addl(Address(rsp, offset), 0);// Assert the lock# signal here
1400       }
1401     }
1402   }
1403 
1404   void mfence();
1405 
1406   // Moves
1407 
1408   void mov64(Register dst, int64_t imm64);
1409 
1410   void movb(Address dst, Register src);
1411   void movb(Address dst, int imm8);
1412   void movb(Register dst, Address src);
1413 
1414   void movddup(XMMRegister dst, XMMRegister src);
1415 
1416   void kmovbl(KRegister dst, Register src);
1417   void kmovbl(Register dst, KRegister src);
1418   void kmovwl(KRegister dst, Register src);
1419   void kmovwl(KRegister dst, Address src);
1420   void kmovwl(Register dst, KRegister src);
1421   void kmovdl(KRegister dst, Register src);
1422   void kmovdl(Register dst, KRegister src);
1423   void kmovql(KRegister dst, KRegister src);
1424   void kmovql(Address dst, KRegister src);
1425   void kmovql(KRegister dst, Address src);
1426   void kmovql(KRegister dst, Register src);
1427   void kmovql(Register dst, KRegister src);
1428 
1429   void knotwl(KRegister dst, KRegister src);
1430 
1431   void kortestbl(KRegister dst, KRegister src);
1432   void kortestwl(KRegister dst, KRegister src);
1433   void kortestdl(KRegister dst, KRegister src);
1434   void kortestql(KRegister dst, KRegister src);
1435 
1436   void ktestq(KRegister src1, KRegister src2);
1437   void ktestd(KRegister src1, KRegister src2);
1438 
1439   void ktestql(KRegister dst, KRegister src);
1440 
1441   void movdl(XMMRegister dst, Register src);
1442   void movdl(Register dst, XMMRegister src);
1443   void movdl(XMMRegister dst, Address src);
1444   void movdl(Address dst, XMMRegister src);
1445 
1446   // Move Double Quadword
1447   void movdq(XMMRegister dst, Register src);
1448   void movdq(Register dst, XMMRegister src);
1449 
1450   // Move Aligned Double Quadword
1451   void movdqa(XMMRegister dst, XMMRegister src);
1452   void movdqa(XMMRegister dst, Address src);
1453 
1454   // Move Unaligned Double Quadword
1455   void movdqu(Address     dst, XMMRegister src);
1456   void movdqu(XMMRegister dst, Address src);
1457   void movdqu(XMMRegister dst, XMMRegister src);
1458 
1459   // Move Unaligned 256bit Vector
1460   void vmovdqu(Address dst, XMMRegister src);
1461   void vmovdqu(XMMRegister dst, Address src);
1462   void vmovdqu(XMMRegister dst, XMMRegister src);
1463 
1464    // Move Unaligned 512bit Vector
1465   void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len);
1466   void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len);
1467   void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len);
1468   void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len);
1469   void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len);
1470   void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len);
1471   void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len);
1472   void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len);
1473   void evmovdqul(Address dst, XMMRegister src, int vector_len);
1474   void evmovdqul(XMMRegister dst, Address src, int vector_len);
1475   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
1476   void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len);
1477   void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len);
1478   void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len);
1479   void evmovdquq(Address dst, XMMRegister src, int vector_len);
1480   void evmovdquq(XMMRegister dst, Address src, int vector_len);
1481   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len);
1482   void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len);
1483   void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len);
1484   void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len);
1485 
1486   // Move lower 64bit to high 64bit in 128bit register
1487   void movlhps(XMMRegister dst, XMMRegister src);
1488 
1489   void movl(Register dst, int32_t imm32);
1490   void movl(Address dst, int32_t imm32);
1491   void movl(Register dst, Register src);
1492   void movl(Register dst, Address src);
1493   void movl(Address dst, Register src);
1494 
1495   // These dummies prevent using movl from converting a zero (like NULL) into Register
1496   // by giving the compiler two choices it can't resolve
1497 
1498   void movl(Address  dst, void* junk);
1499   void movl(Register dst, void* junk);
1500 
1501 #ifdef _LP64
1502   void movq(Register dst, Register src);
1503   void movq(Register dst, Address src);
1504   void movq(Address  dst, Register src);
1505 #endif
1506 
1507   void movq(Address     dst, MMXRegister src );
1508   void movq(MMXRegister dst, Address src );
1509 
1510 #ifdef _LP64
1511   // These dummies prevent using movq from converting a zero (like NULL) into Register
1512   // by giving the compiler two choices it can't resolve
1513 
1514   void movq(Address  dst, void* dummy);
1515   void movq(Register dst, void* dummy);
1516 #endif
1517 
1518   // Move Quadword
1519   void movq(Address     dst, XMMRegister src);
1520   void movq(XMMRegister dst, Address src);
1521   void movq(Register dst, XMMRegister src);
1522   void movq(XMMRegister dst, Register src);
1523 
1524   void movsbl(Register dst, Address src);
1525   void movsbl(Register dst, Register src);
1526 
1527 #ifdef _LP64
1528   void movsbq(Register dst, Address src);
1529   void movsbq(Register dst, Register src);
1530 
1531   // Move signed 32bit immediate to 64bit extending sign
1532   void movslq(Address  dst, int32_t imm64);
1533   void movslq(Register dst, int32_t imm64);
1534 
1535   void movslq(Register dst, Address src);
1536   void movslq(Register dst, Register src);
1537   void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous
1538 #endif
1539 
1540   void movswl(Register dst, Address src);
1541   void movswl(Register dst, Register src);
1542 
1543 #ifdef _LP64
1544   void movswq(Register dst, Address src);
1545   void movswq(Register dst, Register src);
1546 #endif
1547 
1548   void movw(Address dst, int imm16);
1549   void movw(Register dst, Address src);
1550   void movw(Address dst, Register src);
1551 
1552   void movzbl(Register dst, Address src);
1553   void movzbl(Register dst, Register src);
1554 
1555 #ifdef _LP64
1556   void movzbq(Register dst, Address src);
1557   void movzbq(Register dst, Register src);
1558 #endif
1559 
1560   void movzwl(Register dst, Address src);
1561   void movzwl(Register dst, Register src);
1562 
1563 #ifdef _LP64
1564   void movzwq(Register dst, Address src);
1565   void movzwq(Register dst, Register src);
1566 #endif
1567 
1568   // Unsigned multiply with RAX destination register
1569   void mull(Address src);
1570   void mull(Register src);
1571 
1572 #ifdef _LP64
1573   void mulq(Address src);
1574   void mulq(Register src);
1575   void mulxq(Register dst1, Register dst2, Register src);
1576 #endif
1577 
1578   // Multiply Scalar Double-Precision Floating-Point Values
1579   void mulsd(XMMRegister dst, Address src);
1580   void mulsd(XMMRegister dst, XMMRegister src);
1581 
1582   // Multiply Scalar Single-Precision Floating-Point Values
1583   void mulss(XMMRegister dst, Address src);
1584   void mulss(XMMRegister dst, XMMRegister src);
1585 
1586   void negl(Register dst);
1587 
1588 #ifdef _LP64
1589   void negq(Register dst);
1590 #endif
1591 
1592   void nop(int i = 1);
1593 
1594   void notl(Register dst);
1595 
1596 #ifdef _LP64
1597   void notq(Register dst);
1598 #endif
1599 
1600   void orl(Address dst, int32_t imm32);
1601   void orl(Register dst, int32_t imm32);
1602   void orl(Register dst, Address src);
1603   void orl(Register dst, Register src);
1604   void orl(Address dst, Register src);
1605 
1606   void orq(Address dst, int32_t imm32);
1607   void orq(Register dst, int32_t imm32);
1608   void orq(Register dst, Address src);
1609   void orq(Register dst, Register src);
1610 
1611   // Pack with unsigned saturation
1612   void packuswb(XMMRegister dst, XMMRegister src);
1613   void packuswb(XMMRegister dst, Address src);
1614   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1615   void vpackusdw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1616 
1617   // Permutations
1618   void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
1619   void vpermq(XMMRegister dst, XMMRegister src, int imm8);
1620   void vpermd(XMMRegister dst,  XMMRegister nds, XMMRegister src);
1621   void vpermd(XMMRegister dst,  XMMRegister nds, Address src);
1622   void vperm2i128(XMMRegister dst,  XMMRegister nds, XMMRegister src, int imm8);
1623   void vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
1624 
1625   void pause();
1626 
1627   // Undefined Instruction
1628   void ud2();
1629 
1630   // SSE4.2 string instructions
1631   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1632   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1633 
1634   void pcmpeqb(XMMRegister dst, XMMRegister src);
1635   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1636   void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1637   void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1638   void evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len);
1639 
1640   void vpcmpgtb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1641   void evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1642   void evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len);
1643 
1644   void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len);
1645   void evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate of, int vector_len);
1646   void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len);
1647 
1648   void pcmpeqw(XMMRegister dst, XMMRegister src);
1649   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1650   void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1651   void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1652 
1653   void vpcmpgtw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1654 
1655   void pcmpeqd(XMMRegister dst, XMMRegister src);
1656   void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1657   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int vector_len);
1658   void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len);
1659 
1660   void pcmpeqq(XMMRegister dst, XMMRegister src);
1661   void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1662   void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1663   void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1664 
1665   void pcmpgtq(XMMRegister dst, XMMRegister src);
1666   void vpcmpgtq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1667 
1668   void pmovmskb(Register dst, XMMRegister src);
1669   void vpmovmskb(Register dst, XMMRegister src);
1670 
1671   // SSE 4.1 extract
1672   void pextrd(Register dst, XMMRegister src, int imm8);
1673   void pextrq(Register dst, XMMRegister src, int imm8);
1674   void pextrd(Address dst, XMMRegister src, int imm8);
1675   void pextrq(Address dst, XMMRegister src, int imm8);
1676   void pextrb(Register dst, XMMRegister src, int imm8);
1677   void pextrb(Address dst, XMMRegister src, int imm8);
1678   // SSE 2 extract
1679   void pextrw(Register dst, XMMRegister src, int imm8);
1680   void pextrw(Address dst, XMMRegister src, int imm8);
1681 
1682   // SSE 4.1 insert
1683   void pinsrd(XMMRegister dst, Register src, int imm8);
1684   void pinsrq(XMMRegister dst, Register src, int imm8);
1685   void pinsrd(XMMRegister dst, Address src, int imm8);
1686   void pinsrq(XMMRegister dst, Address src, int imm8);
1687   void pinsrb(XMMRegister dst, Address src, int imm8);
1688   // SSE 2 insert
1689   void pinsrw(XMMRegister dst, Register src, int imm8);
1690   void pinsrw(XMMRegister dst, Address src, int imm8);
1691 
1692   // Zero extend moves
1693   void pmovzxbw(XMMRegister dst, XMMRegister src);
1694   void pmovzxbw(XMMRegister dst, Address src);
1695   void vpmovzxbw( XMMRegister dst, Address src, int vector_len);
1696   void pmovzxdq(XMMRegister dst, XMMRegister src);
1697   void vpmovzxdq(XMMRegister dst, XMMRegister src, int vector_len);
1698   void vpmovzxbd(XMMRegister dst, XMMRegister src, int vector_len);
1699   void vpmovzxbq(XMMRegister dst, XMMRegister src, int vector_len);
1700   void evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len);
1701 
1702   // Sign extend moves
1703   void pmovsxbw(XMMRegister dst, XMMRegister src);
1704   void pmovsxbd(XMMRegister dst, XMMRegister src);
1705   void pmovsxbq(XMMRegister dst, XMMRegister src);
1706   void vpmovsxbd(XMMRegister dst, XMMRegister src, int vector_len);
1707   void vpmovsxbq(XMMRegister dst, XMMRegister src, int vector_len);
1708   void vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len);
1709 
1710   void evpmovwb(Address dst, XMMRegister src, int vector_len);
1711   void evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len);
1712 
1713 #ifndef _LP64 // no 32bit push/pop on amd64
1714   void popl(Address dst);
1715 #endif
1716 
1717 #ifdef _LP64
1718   void popq(Address dst);
1719 #endif
1720 
1721   void popcntl(Register dst, Address src);
1722   void popcntl(Register dst, Register src);
1723 
1724   void vpopcntd(XMMRegister dst, XMMRegister src, int vector_len);
1725 
1726 #ifdef _LP64
1727   void popcntq(Register dst, Address src);
1728   void popcntq(Register dst, Register src);
1729 #endif
1730 
1731   // Prefetches (SSE, SSE2, 3DNOW only)
1732 
1733   void prefetchnta(Address src);
1734   void prefetchr(Address src);
1735   void prefetcht0(Address src);
1736   void prefetcht1(Address src);
1737   void prefetcht2(Address src);
1738   void prefetchw(Address src);
1739 
1740   // Shuffle Bytes
1741   void pshufb(XMMRegister dst, XMMRegister src);
1742   void pshufb(XMMRegister dst, Address src);
1743   void vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1744 
1745   // Shuffle Packed Doublewords
1746   void pshufd(XMMRegister dst, XMMRegister src, int mode);
1747   void pshufd(XMMRegister dst, Address src,     int mode);
1748   void vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len);
1749 
1750   // Shuffle Packed Low Words
1751   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1752   void pshuflw(XMMRegister dst, Address src,     int mode);
1753 
1754   // Shift Right by bytes Logical DoubleQuadword Immediate
1755   void psrldq(XMMRegister dst, int shift);
1756   // Shift Left by bytes Logical DoubleQuadword Immediate
1757   void pslldq(XMMRegister dst, int shift);
1758 
1759   // Logical Compare 128bit
1760   void ptest(XMMRegister dst, XMMRegister src);
1761   void ptest(XMMRegister dst, Address src);
1762   // Logical Compare 256bit
1763   void vptest(XMMRegister dst, XMMRegister src);
1764   void vptest(XMMRegister dst, Address src);
1765 
1766   // Vector compare
1767   void vptest(XMMRegister dst, XMMRegister src, int vector_len);
1768 
1769   // Interleave Low Bytes
1770   void punpcklbw(XMMRegister dst, XMMRegister src);
1771   void punpcklbw(XMMRegister dst, Address src);
1772 
1773   // Interleave Low Doublewords
1774   void punpckldq(XMMRegister dst, XMMRegister src);
1775   void punpckldq(XMMRegister dst, Address src);
1776 
1777   // Interleave Low Quadwords
1778   void punpcklqdq(XMMRegister dst, XMMRegister src);
1779 
1780 #ifndef _LP64 // no 32bit push/pop on amd64
1781   void pushl(Address src);
1782 #endif
1783 
1784   void pushq(Address src);
1785 
1786   void rcll(Register dst, int imm8);
1787 
1788   void rclq(Register dst, int imm8);
1789 
1790   void rcrq(Register dst, int imm8);
1791 
1792   void rcpps(XMMRegister dst, XMMRegister src);
1793 
1794   void rcpss(XMMRegister dst, XMMRegister src);
1795 
1796   void rdtsc();
1797 
1798   void ret(int imm16);
1799 
1800 #ifdef _LP64
1801   void rorq(Register dst, int imm8);
1802   void rorxq(Register dst, Register src, int imm8);
1803   void rorxd(Register dst, Register src, int imm8);
1804 #endif
1805 
1806   void sahf();
1807 
1808   void sarl(Register dst, int imm8);
1809   void sarl(Register dst);
1810 
1811   void sarq(Register dst, int imm8);
1812   void sarq(Register dst);
1813 
1814   void sbbl(Address dst, int32_t imm32);
1815   void sbbl(Register dst, int32_t imm32);
1816   void sbbl(Register dst, Address src);
1817   void sbbl(Register dst, Register src);
1818 
1819   void sbbq(Address dst, int32_t imm32);
1820   void sbbq(Register dst, int32_t imm32);
1821   void sbbq(Register dst, Address src);
1822   void sbbq(Register dst, Register src);
1823 
1824   void setb(Condition cc, Register dst);
1825 
1826   void palignr(XMMRegister dst, XMMRegister src, int imm8);
1827   void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, int imm8, int vector_len);
1828 
1829   void pblendw(XMMRegister dst, XMMRegister src, int imm8);
1830 
1831   void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8);
1832   void sha1nexte(XMMRegister dst, XMMRegister src);
1833   void sha1msg1(XMMRegister dst, XMMRegister src);
1834   void sha1msg2(XMMRegister dst, XMMRegister src);
1835   // xmm0 is implicit additional source to the following instruction.
1836   void sha256rnds2(XMMRegister dst, XMMRegister src);
1837   void sha256msg1(XMMRegister dst, XMMRegister src);
1838   void sha256msg2(XMMRegister dst, XMMRegister src);
1839 
1840   void shldl(Register dst, Register src);
1841   void shldl(Register dst, Register src, int8_t imm8);
1842 
1843   void shll(Register dst, int imm8);
1844   void shll(Register dst);
1845 
1846   void shlq(Register dst, int imm8);
1847   void shlq(Register dst);
1848 
1849   void shrdl(Register dst, Register src);
1850 
1851   void shrl(Register dst, int imm8);
1852   void shrl(Register dst);
1853 
1854   void shrq(Register dst, int imm8);
1855   void shrq(Register dst);
1856 
1857   void smovl(); // QQQ generic?
1858 
1859   // Compute Square Root of Scalar Double-Precision Floating-Point Value
1860   void sqrtsd(XMMRegister dst, Address src);
1861   void sqrtsd(XMMRegister dst, XMMRegister src);
1862 
1863   // Compute Square Root of Scalar Single-Precision Floating-Point Value
1864   void sqrtss(XMMRegister dst, Address src);
1865   void sqrtss(XMMRegister dst, XMMRegister src);
1866 
1867   void std();
1868 
1869   void stmxcsr( Address dst );
1870 
1871   void subl(Address dst, int32_t imm32);
1872   void subl(Address dst, Register src);
1873   void subl(Register dst, int32_t imm32);
1874   void subl(Register dst, Address src);
1875   void subl(Register dst, Register src);
1876 
1877   void subq(Address dst, int32_t imm32);
1878   void subq(Address dst, Register src);
1879   void subq(Register dst, int32_t imm32);
1880   void subq(Register dst, Address src);
1881   void subq(Register dst, Register src);
1882 
1883   // Force generation of a 4 byte immediate value even if it fits into 8bit
1884   void subl_imm32(Register dst, int32_t imm32);
1885   void subq_imm32(Register dst, int32_t imm32);
1886 
1887   // Subtract Scalar Double-Precision Floating-Point Values
1888   void subsd(XMMRegister dst, Address src);
1889   void subsd(XMMRegister dst, XMMRegister src);
1890 
1891   // Subtract Scalar Single-Precision Floating-Point Values
1892   void subss(XMMRegister dst, Address src);
1893   void subss(XMMRegister dst, XMMRegister src);
1894 
1895   void testb(Register dst, int imm8);
1896   void testb(Address dst, int imm8);
1897 
1898   void testl(Register dst, int32_t imm32);
1899   void testl(Register dst, Register src);
1900   void testl(Register dst, Address src);
1901 
1902   void testq(Register dst, int32_t imm32);
1903   void testq(Register dst, Register src);
1904 
1905   // BMI - count trailing zeros
1906   void tzcntl(Register dst, Register src);
1907   void tzcntq(Register dst, Register src);
1908 
1909   // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
1910   void ucomisd(XMMRegister dst, Address src);
1911   void ucomisd(XMMRegister dst, XMMRegister src);
1912 
1913   // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
1914   void ucomiss(XMMRegister dst, Address src);
1915   void ucomiss(XMMRegister dst, XMMRegister src);
1916 
1917   void xabort(int8_t imm8);
1918 
1919   void xaddb(Address dst, Register src);
1920   void xaddw(Address dst, Register src);
1921   void xaddl(Address dst, Register src);
1922   void xaddq(Address dst, Register src);
1923 
1924   void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
1925 
1926   void xchgb(Register reg, Address adr);
1927   void xchgw(Register reg, Address adr);
1928   void xchgl(Register reg, Address adr);
1929   void xchgl(Register dst, Register src);
1930 
1931   void xchgq(Register reg, Address adr);
1932   void xchgq(Register dst, Register src);
1933 
1934   void xend();
1935 
1936   // Get Value of Extended Control Register
1937   void xgetbv();
1938 
1939   void xorl(Register dst, int32_t imm32);
1940   void xorl(Register dst, Address src);
1941   void xorl(Register dst, Register src);
1942 
1943   void xorb(Register dst, Address src);
1944 
1945   void xorq(Register dst, Address src);
1946   void xorq(Register dst, Register src);
1947 
1948   void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
1949 
1950   // AVX 3-operands scalar instructions (encoded with VEX prefix)
1951 
1952   void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
1953   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1954   void vaddss(XMMRegister dst, XMMRegister nds, Address src);
1955   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1956   void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
1957   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1958   void vdivss(XMMRegister dst, XMMRegister nds, Address src);
1959   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1960   void vfmadd231sd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1961   void vfmadd231ss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1962   void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
1963   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1964   void vmulss(XMMRegister dst, XMMRegister nds, Address src);
1965   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1966   void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
1967   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1968   void vsubss(XMMRegister dst, XMMRegister nds, Address src);
1969   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1970 
1971   void shlxl(Register dst, Register src1, Register src2);
1972   void shlxq(Register dst, Register src1, Register src2);
1973 
1974   //====================VECTOR ARITHMETIC=====================================
1975 
1976   // Add Packed Floating-Point Values
1977   void addpd(XMMRegister dst, XMMRegister src);
1978   void addpd(XMMRegister dst, Address src);
1979   void addps(XMMRegister dst, XMMRegister src);
1980   void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1981   void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1982   void vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1983   void vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1984 
1985   // Subtract Packed Floating-Point Values
1986   void subpd(XMMRegister dst, XMMRegister src);
1987   void subps(XMMRegister dst, XMMRegister src);
1988   void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1989   void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1990   void vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1991   void vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1992 
1993   // Multiply Packed Floating-Point Values
1994   void mulpd(XMMRegister dst, XMMRegister src);
1995   void mulpd(XMMRegister dst, Address src);
1996   void mulps(XMMRegister dst, XMMRegister src);
1997   void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1998   void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1999   void vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2000   void vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2001 
2002   void vfmadd231pd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2003   void vfmadd231ps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2004   void vfmadd231pd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2005   void vfmadd231ps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2006 
2007   // Divide Packed Floating-Point Values
2008   void divpd(XMMRegister dst, XMMRegister src);
2009   void divps(XMMRegister dst, XMMRegister src);
2010   void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2011   void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2012   void vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2013   void vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2014 
2015   // Sqrt Packed Floating-Point Values
2016   void vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len);
2017   void vsqrtpd(XMMRegister dst, Address src, int vector_len);
2018   void vsqrtps(XMMRegister dst, XMMRegister src, int vector_len);
2019   void vsqrtps(XMMRegister dst, Address src, int vector_len);
2020 
2021   // Bitwise Logical AND of Packed Floating-Point Values
2022   void andpd(XMMRegister dst, XMMRegister src);
2023   void andps(XMMRegister dst, XMMRegister src);
2024   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2025   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2026   void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2027   void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2028 
2029   void unpckhpd(XMMRegister dst, XMMRegister src);
2030   void unpcklpd(XMMRegister dst, XMMRegister src);
2031 
2032   // Bitwise Logical XOR of Packed Floating-Point Values
2033   void xorpd(XMMRegister dst, XMMRegister src);
2034   void xorps(XMMRegister dst, XMMRegister src);
2035   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2036   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2037   void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2038   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2039 
2040   // Add horizontal packed integers
2041   void vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2042   void vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2043   void phaddw(XMMRegister dst, XMMRegister src);
2044   void phaddd(XMMRegister dst, XMMRegister src);
2045 
2046   // Add packed integers
2047   void paddb(XMMRegister dst, XMMRegister src);
2048   void paddw(XMMRegister dst, XMMRegister src);
2049   void paddd(XMMRegister dst, XMMRegister src);
2050   void paddd(XMMRegister dst, Address src);
2051   void paddq(XMMRegister dst, XMMRegister src);
2052   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2053   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2054   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2055   void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2056   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2057   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2058   void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2059   void vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2060 
2061   // Sub packed integers
2062   void psubb(XMMRegister dst, XMMRegister src);
2063   void psubw(XMMRegister dst, XMMRegister src);
2064   void psubd(XMMRegister dst, XMMRegister src);
2065   void psubq(XMMRegister dst, XMMRegister src);
2066   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2067   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2068   void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2069   void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2070   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2071   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2072   void vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2073   void vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2074 
2075   // Multiply packed integers (only shorts and ints)
2076   void pmullw(XMMRegister dst, XMMRegister src);
2077   void pmulld(XMMRegister dst, XMMRegister src);
2078   void pmuludq(XMMRegister dst, XMMRegister src);
2079   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2080   void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2081   void vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2082   void vpmuludq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2083   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2084   void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2085   void vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2086 
2087   // Minimum of packed integers
2088   void pminsb(XMMRegister dst, XMMRegister src);
2089   void vpminsb(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2090   void pminsw(XMMRegister dst, XMMRegister src);
2091   void vpminsw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2092   void pminsd(XMMRegister dst, XMMRegister src);
2093   void vpminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2094   void vpminsq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2095   void minps(XMMRegister dst, XMMRegister src);
2096   void vminps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2097   void minpd(XMMRegister dst, XMMRegister src);
2098   void vminpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2099 
2100   // Maximum of packed integers
2101   void pmaxsb(XMMRegister dst, XMMRegister src);
2102   void vpmaxsb(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2103   void pmaxsw(XMMRegister dst, XMMRegister src);
2104   void vpmaxsw(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2105   void pmaxsd(XMMRegister dst, XMMRegister src);
2106   void vpmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2107   void vpmaxsq(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2108   void maxps(XMMRegister dst, XMMRegister src);
2109   void vmaxps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2110   void maxpd(XMMRegister dst, XMMRegister src);
2111   void vmaxpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
2112 
2113   // Shift left packed integers
2114   void psllw(XMMRegister dst, int shift);
2115   void pslld(XMMRegister dst, int shift);
2116   void psllq(XMMRegister dst, int shift);
2117   void psllw(XMMRegister dst, XMMRegister shift);
2118   void pslld(XMMRegister dst, XMMRegister shift);
2119   void psllq(XMMRegister dst, XMMRegister shift);
2120   void vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2121   void vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2122   void vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2123   void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2124   void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2125   void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2126 
2127   // Logical shift right packed integers
2128   void psrlw(XMMRegister dst, int shift);
2129   void psrld(XMMRegister dst, int shift);
2130   void psrlq(XMMRegister dst, int shift);
2131   void psrlw(XMMRegister dst, XMMRegister shift);
2132   void psrld(XMMRegister dst, XMMRegister shift);
2133   void psrlq(XMMRegister dst, XMMRegister shift);
2134   void vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2135   void vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2136   void vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2137   void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2138   void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2139   void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2140 
2141   // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs)
2142   void psraw(XMMRegister dst, int shift);
2143   void psrad(XMMRegister dst, int shift);
2144   void psraw(XMMRegister dst, XMMRegister shift);
2145   void psrad(XMMRegister dst, XMMRegister shift);
2146   void vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2147   void vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2148   void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2149   void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2150 
2151   // Variable shift left packed integers
2152   void vpsllvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2153   void vpsllvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2154 
2155   // Variable shift right packed integers
2156   void vpsrlvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2157   void vpsrlvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2158 
2159   // Variable shift right arithmetic packed integers
2160   void vpsravd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2161   void vpsravq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2162 
2163   // And packed integers
2164   void pand(XMMRegister dst, XMMRegister src);
2165   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2166   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2167   void evpandd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
2168   void vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2169 
2170   // Andn packed integers
2171   void pandn(XMMRegister dst, XMMRegister src);
2172 
2173   // Or packed integers
2174   void por(XMMRegister dst, XMMRegister src);
2175   void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2176   void vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2177   void vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2178 
2179   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
2180   void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
2181 
2182   // Xor packed integers
2183   void pxor(XMMRegister dst, XMMRegister src);
2184   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2185   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2186   void vpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2187   void evpxord(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
2188 
2189   // vinserti forms
2190   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2191   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2192   void vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2193   void vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2194   void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2195 
2196   // vinsertf forms
2197   void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2198   void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2199   void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2200   void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2201   void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2202   void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2203 
2204   // vextracti forms
2205   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8);
2206   void vextracti128(Address dst, XMMRegister src, uint8_t imm8);
2207   void vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2208   void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8);
2209   void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
2210   void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2211 
2212   // vextractf forms
2213   void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8);
2214   void vextractf128(Address dst, XMMRegister src, uint8_t imm8);
2215   void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2216   void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8);
2217   void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
2218   void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2219   void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8);
2220 
2221   // legacy xmm sourced word/dword replicate
2222   void vpbroadcastw(XMMRegister dst, XMMRegister src);
2223   void vpbroadcastd(XMMRegister dst, XMMRegister src);
2224 
2225   // xmm/mem sourced byte/word/dword/qword replicate
2226   void evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len);
2227   void evpbroadcastb(XMMRegister dst, Address src, int vector_len);
2228   void evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
2229   void evpbroadcastw(XMMRegister dst, Address src, int vector_len);
2230   void evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len);
2231   void evpbroadcastd(XMMRegister dst, Address src, int vector_len);
2232   void evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len);
2233   void evpbroadcastq(XMMRegister dst, Address src, int vector_len);
2234 
2235   // scalar single/double precision replicate
2236   void evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len);
2237   void evpbroadcastss(XMMRegister dst, Address src, int vector_len);
2238   void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
2239   void evpbroadcastsd(XMMRegister dst, Address src, int vector_len);
2240 
2241   // gpr sourced byte/word/dword/qword replicate
2242   void evpbroadcastb(XMMRegister dst, Register src, int vector_len);
2243   void evpbroadcastw(XMMRegister dst, Register src, int vector_len);
2244   void evpbroadcastd(XMMRegister dst, Register src, int vector_len);
2245   void evpbroadcastq(XMMRegister dst, Register src, int vector_len);
2246 
2247   // Carry-Less Multiplication Quadword
2248   void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
2249   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
2250 
2251   // AVX instruction which is used to clear upper 128 bits of YMM registers and
2252   // to avoid transaction penalty between AVX and SSE states. There is no
2253   // penalty if legacy SSE instructions are encoded using VEX prefix because
2254   // they always clear upper 128 bits. It should be used before calling
2255   // runtime code and native libraries.
2256   void vzeroupper();
2257 
2258   // Vector double compares
2259   void vcmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
2260   void evcmppd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
2261                ComparisonPredicateFP comparison, int vector_len);
2262 
2263   // Vector float compares
2264   void vcmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int comparison, int vector_len);
2265   void evcmpps(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
2266                ComparisonPredicateFP comparison, int vector_len);
2267 
2268   // Vector integer compares
2269   void vpcmpgtd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2270   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
2271                int comparison, int vector_len);
2272   void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
2273                int comparison, int vector_len);
2274 
2275   // Vector long compares
2276   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
2277                int comparison, int vector_len);
2278   void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
2279                int comparison, int vector_len);
2280 
2281   // Vector byte compares
2282   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
2283                int comparison, int vector_len);
2284   void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
2285                int comparison, int vector_len);
2286 
2287   // Vector short compares
2288   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
2289                int comparison, int vector_len);
2290   void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address src,
2291                int comparison, int vector_len);
2292 
2293   // Vector blends
2294   void blendvps(XMMRegister dst, XMMRegister src);
2295   void blendvpd(XMMRegister dst, XMMRegister src);
2296   void pblendvb(XMMRegister dst, XMMRegister src);
2297   void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len);
2298   void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
2299   void vpblendvb(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len);
2300   void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);
2301   void evblendmpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
2302   void evblendmps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
2303   void evpblendmb(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
2304   void evpblendmw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
2305   void evpblendmd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
2306   void evpblendmq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
2307  protected:
2308   // Next instructions require address alignment 16 bytes SSE mode.
2309   // They should be called only from corresponding MacroAssembler instructions.
2310   void andpd(XMMRegister dst, Address src);
2311   void andps(XMMRegister dst, Address src);
2312   void xorpd(XMMRegister dst, Address src);
2313   void xorps(XMMRegister dst, Address src);
2314 
2315 };
2316 
2317 // The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions.
2318 // Specific set functions are for specialized use, else defaults or whatever was supplied to object construction
2319 // are applied.
2320 class InstructionAttr {
2321 public:
2322   InstructionAttr(
2323     int vector_len,     // The length of vector to be applied in encoding - for both AVX and EVEX
2324     bool rex_vex_w,     // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
2325     bool legacy_mode,   // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
2326     bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition
2327     bool uses_vl)       // This instruction may have legacy constraints based on vector length for EVEX
2328     :
2329       _avx_vector_len(vector_len),
2330       _rex_vex_w(rex_vex_w),
2331       _rex_vex_w_reverted(false),
2332       _legacy_mode(legacy_mode),
2333       _no_reg_mask(no_reg_mask),
2334       _uses_vl(uses_vl),
2335       _tuple_type(Assembler::EVEX_ETUP),
2336       _input_size_in_bits(Assembler::EVEX_NObit),
2337       _is_evex_instruction(false),
2338       _evex_encoding(0),
2339       _is_clear_context(true),
2340       _is_extended_context(false),
2341       _current_assembler(NULL),
2342       _embedded_opmask_register_specifier(1) { // hard code k1, it will be initialized for now
2343     if (UseAVX < 3) _legacy_mode = true;
2344   }
2345 
2346   ~InstructionAttr() {
2347     if (_current_assembler != NULL) {
2348       _current_assembler->clear_attributes();
2349     }
2350     _current_assembler = NULL;
2351   }
2352 
2353 private:
2354   int  _avx_vector_len;
2355   bool _rex_vex_w;
2356   bool _rex_vex_w_reverted;
2357   bool _legacy_mode;
2358   bool _no_reg_mask;
2359   bool _uses_vl;
2360   int  _tuple_type;
2361   int  _input_size_in_bits;
2362   bool _is_evex_instruction;
2363   int  _evex_encoding;
2364   bool _is_clear_context;
2365   bool _is_extended_context;
2366   int _embedded_opmask_register_specifier;
2367 
2368   Assembler *_current_assembler;
2369 
2370 public:
2371   // query functions for field accessors
2372   int  get_vector_len(void) const { return _avx_vector_len; }
2373   bool is_rex_vex_w(void) const { return _rex_vex_w; }
2374   bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
2375   bool is_legacy_mode(void) const { return _legacy_mode; }
2376   bool is_no_reg_mask(void) const { return _no_reg_mask; }
2377   bool uses_vl(void) const { return _uses_vl; }
2378   int  get_tuple_type(void) const { return _tuple_type; }
2379   int  get_input_size(void) const { return _input_size_in_bits; }
2380   int  is_evex_instruction(void) const { return _is_evex_instruction; }
2381   int  get_evex_encoding(void) const { return _evex_encoding; }
2382   bool is_clear_context(void) const { return _is_clear_context; }
2383   bool is_extended_context(void) const { return _is_extended_context; }
2384   int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; }
2385 
2386   // Set the vector len manually
2387   void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
2388 
2389   // Set revert rex_vex_w for avx encoding
2390   void set_rex_vex_w_reverted(void) { _rex_vex_w_reverted = true; }
2391 
2392   // Set rex_vex_w based on state
2393   void set_rex_vex_w(bool state) { _rex_vex_w = state; }
2394 
2395   // Set the instruction to be encoded in AVX mode
2396   void set_is_legacy_mode(void) { _legacy_mode = true; }
2397 
2398   // Set the current instuction to be encoded as an EVEX instuction
2399   void set_is_evex_instruction(void) { _is_evex_instruction = true; }
2400 
2401   // Internal encoding data used in compressed immediate offset programming
2402   void set_evex_encoding(int value) { _evex_encoding = value; }
2403 
2404   // When the Evex.Z field is set (true), it is used to clear all non directed XMM/YMM/ZMM components.
2405   // This method unsets it so that merge semantics are used instead.
2406   void reset_is_clear_context(void) { _is_clear_context = false; }
2407 
2408   // Map back to current asembler so that we can manage object level assocation
2409   void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
2410 
2411   // Address modifiers used for compressed displacement calculation
2412   void set_address_attributes(int tuple_type, int input_size_in_bits) {
2413     if (VM_Version::supports_evex()) {
2414       _tuple_type = tuple_type;
2415       _input_size_in_bits = input_size_in_bits;
2416     }
2417   }
2418 
2419   // Set embedded opmask register specifier.
2420   void set_embedded_opmask_register_specifier(KRegister mask) {
2421     _embedded_opmask_register_specifier = (*mask).encoding() & 0x7;
2422   }
2423 
2424 };
2425 
2426 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP