1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "utilities/macros.hpp"
  30 #include "runtime/rtmLocking.hpp"
  31 
  32 // MacroAssembler extends Assembler by frequently used macros.
  33 //
  34 // Instructions for which a 'better' code sequence exists depending
  35 // on arguments should also go in here.
  36 
  37 class MacroAssembler: public Assembler {
  38   friend class LIR_Assembler;
  39   friend class Runtime1;      // as_Address()
  40 
  41  public:
  42   // Support for VM calls
  43   //
  44   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  45   // may customize this version by overriding it for its purposes (e.g., to save/restore
  46   // additional registers when doing a VM call).
  47 
  48   virtual void call_VM_leaf_base(
  49     address entry_point,               // the entry point
  50     int     number_of_arguments        // the number of arguments to pop after the call
  51   );
  52 
  53  protected:
  54   // This is the base routine called by the different versions of call_VM. The interpreter
  55   // may customize this version by overriding it for its purposes (e.g., to save/restore
  56   // additional registers when doing a VM call).
  57   //
  58   // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
  59   // returns the register which contains the thread upon return. If a thread register has been
  60   // specified, the return value will correspond to that register. If no last_java_sp is specified
  61   // (noreg) than rsp will be used instead.
  62   virtual void call_VM_base(           // returns the register containing the thread upon return
  63     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  64     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  65     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  66     address  entry_point,              // the entry point
  67     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  68     bool     check_exceptions          // whether to check for pending exceptions after return
  69   );
  70 
  71   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  72 
  73   // helpers for FPU flag access
  74   // tmp is a temporary register, if none is available use noreg
  75   void save_rax   (Register tmp);
  76   void restore_rax(Register tmp);
  77 
  78  public:
  79   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  80 
  81  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  82  // The implementation is only non-empty for the InterpreterMacroAssembler,
  83  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  84  virtual void check_and_handle_popframe(Register java_thread);
  85  virtual void check_and_handle_earlyret(Register java_thread);
  86 
  87   Address as_Address(AddressLiteral adr);
  88   Address as_Address(ArrayAddress adr);
  89 
  90   // Support for NULL-checks
  91   //
  92   // Generates code that causes a NULL OS exception if the content of reg is NULL.
  93   // If the accessed location is M[reg + offset] and the offset is known, provide the
  94   // offset. No explicit code generation is needed if the offset is within a certain
  95   // range (0 <= offset <= page_size).
  96 
  97   void null_check(Register reg, int offset = -1);
  98   static bool needs_explicit_null_check(intptr_t offset);
  99 
 100   // Required platform-specific helpers for Label::patch_instructions.
 101   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 102   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 103     unsigned char op = branch[0];
 104     assert(op == 0xE8 /* call */ ||
 105         op == 0xE9 /* jmp */ ||
 106         op == 0xEB /* short jmp */ ||
 107         (op & 0xF0) == 0x70 /* short jcc */ ||
 108         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 109         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 110         "Invalid opcode at patch point");
 111 
 112     if (op == 0xEB || (op & 0xF0) == 0x70) {
 113       // short offset operators (jmp and jcc)
 114       char* disp = (char*) &branch[1];
 115       int imm8 = target - (address) &disp[1];
 116       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", file, line);
 117       *disp = imm8;
 118     } else {
 119       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
 120       int imm32 = target - (address) &disp[1];
 121       *disp = imm32;
 122     }
 123   }
 124 
 125   // The following 4 methods return the offset of the appropriate move instruction
 126 
 127   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 128   int load_unsigned_byte(Register dst, Address src);
 129   int load_unsigned_short(Register dst, Address src);
 130 
 131   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 132   int load_signed_byte(Register dst, Address src);
 133   int load_signed_short(Register dst, Address src);
 134 
 135   // Support for sign-extension (hi:lo = extend_sign(lo))
 136   void extend_sign(Register hi, Register lo);
 137 
 138   // Load and store values by size and signed-ness
 139   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 140   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 141 
 142   // Support for inc/dec with optimal instruction selection depending on value
 143 
 144   void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
 145   void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
 146 
 147   void decrementl(Address dst, int value = 1);
 148   void decrementl(Register reg, int value = 1);
 149 
 150   void decrementq(Register reg, int value = 1);
 151   void decrementq(Address dst, int value = 1);
 152 
 153   void incrementl(Address dst, int value = 1);
 154   void incrementl(Register reg, int value = 1);
 155 
 156   void incrementq(Register reg, int value = 1);
 157   void incrementq(Address dst, int value = 1);
 158 
 159   // special instructions for EVEX
 160   void setvectmask(Register dst, Register src);
 161   void restorevectmask();
 162 
 163   // Support optimal SSE move instructions.
 164   void movflt(XMMRegister dst, XMMRegister src) {
 165     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 166     else                       { movss (dst, src); return; }
 167   }
 168   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 169   void movflt(XMMRegister dst, AddressLiteral src);
 170   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 171 
 172   void movdbl(XMMRegister dst, XMMRegister src) {
 173     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 174     else                       { movsd (dst, src); return; }
 175   }
 176 
 177   void movdbl(XMMRegister dst, AddressLiteral src);
 178 
 179   void movdbl(XMMRegister dst, Address src) {
 180     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 181     else                         { movlpd(dst, src); return; }
 182   }
 183   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 184 
 185   void incrementl(AddressLiteral dst);
 186   void incrementl(ArrayAddress dst);
 187 
 188   void incrementq(AddressLiteral dst);
 189 
 190   // Alignment
 191   void align(int modulus);
 192   void align(int modulus, int target);
 193 
 194   // A 5 byte nop that is safe for patching (see patch_verified_entry)
 195   void fat_nop();
 196 
 197   // Stack frame creation/removal
 198   void enter();
 199   void leave();
 200 
 201   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 202   // The pointer will be loaded into the thread register.
 203   void get_thread(Register thread);
 204 
 205 
 206   // Support for VM calls
 207   //
 208   // It is imperative that all calls into the VM are handled via the call_VM macros.
 209   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 210   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 211 
 212 
 213   void call_VM(Register oop_result,
 214                address entry_point,
 215                bool check_exceptions = true);
 216   void call_VM(Register oop_result,
 217                address entry_point,
 218                Register arg_1,
 219                bool check_exceptions = true);
 220   void call_VM(Register oop_result,
 221                address entry_point,
 222                Register arg_1, Register arg_2,
 223                bool check_exceptions = true);
 224   void call_VM(Register oop_result,
 225                address entry_point,
 226                Register arg_1, Register arg_2, Register arg_3,
 227                bool check_exceptions = true);
 228 
 229   // Overloadings with last_Java_sp
 230   void call_VM(Register oop_result,
 231                Register last_java_sp,
 232                address entry_point,
 233                int number_of_arguments = 0,
 234                bool check_exceptions = true);
 235   void call_VM(Register oop_result,
 236                Register last_java_sp,
 237                address entry_point,
 238                Register arg_1, bool
 239                check_exceptions = true);
 240   void call_VM(Register oop_result,
 241                Register last_java_sp,
 242                address entry_point,
 243                Register arg_1, Register arg_2,
 244                bool check_exceptions = true);
 245   void call_VM(Register oop_result,
 246                Register last_java_sp,
 247                address entry_point,
 248                Register arg_1, Register arg_2, Register arg_3,
 249                bool check_exceptions = true);
 250 
 251   void get_vm_result  (Register oop_result, Register thread);
 252   void get_vm_result_2(Register metadata_result, Register thread);
 253 
 254   // These always tightly bind to MacroAssembler::call_VM_base
 255   // bypassing the virtual implementation
 256   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 257   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 258   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 259   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 260   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 261 
 262   void call_VM_leaf0(address entry_point);
 263   void call_VM_leaf(address entry_point,
 264                     int number_of_arguments = 0);
 265   void call_VM_leaf(address entry_point,
 266                     Register arg_1);
 267   void call_VM_leaf(address entry_point,
 268                     Register arg_1, Register arg_2);
 269   void call_VM_leaf(address entry_point,
 270                     Register arg_1, Register arg_2, Register arg_3);
 271 
 272   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 273   // bypassing the virtual implementation
 274   void super_call_VM_leaf(address entry_point);
 275   void super_call_VM_leaf(address entry_point, Register arg_1);
 276   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 277   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 278   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 279 
 280   // last Java Frame (fills frame anchor)
 281   void set_last_Java_frame(Register thread,
 282                            Register last_java_sp,
 283                            Register last_java_fp,
 284                            address last_java_pc);
 285 
 286   // thread in the default location (r15_thread on 64bit)
 287   void set_last_Java_frame(Register last_java_sp,
 288                            Register last_java_fp,
 289                            address last_java_pc);
 290 
 291   void reset_last_Java_frame(Register thread, bool clear_fp);
 292 
 293   // thread in the default location (r15_thread on 64bit)
 294   void reset_last_Java_frame(bool clear_fp);
 295 
 296   // jobjects
 297   void clear_jweak_tag(Register possibly_jweak);
 298   void resolve_jobject(Register value, Register thread, Register tmp);
 299 
 300   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 301   void c2bool(Register x);
 302 
 303   // C++ bool manipulation
 304 
 305   void movbool(Register dst, Address src);
 306   void movbool(Address dst, bool boolconst);
 307   void movbool(Address dst, Register src);
 308   void testbool(Register dst);
 309 
 310   void resolve_oop_handle(Register result, Register tmp = rscratch2);
 311   void load_mirror(Register mirror, Register method, Register tmp = rscratch2);
 312 
 313   // oop manipulations
 314   void load_klass(Register dst, Register src);
 315   void store_klass(Register dst, Register src);
 316 
 317   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 318                       Register tmp1, Register thread_tmp);
 319   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
 320                        Register tmp1, Register tmp2);
 321 
 322   // Resolves obj access. Result is placed in the same register.
 323   // All other registers are preserved.
 324   void resolve(DecoratorSet decorators, Register obj);
 325 
 326   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 327                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 328   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 329                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 330   void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
 331                       Register tmp2 = noreg, DecoratorSet decorators = 0);
 332 
 333   // Used for storing NULL. All other oop constants should be
 334   // stored using routines that take a jobject.
 335   void store_heap_oop_null(Address dst);
 336 
 337   void load_prototype_header(Register dst, Register src);
 338 
 339 #ifdef _LP64
 340   void store_klass_gap(Register dst, Register src);
 341 
 342   // This dummy is to prevent a call to store_heap_oop from
 343   // converting a zero (like NULL) into a Register by giving
 344   // the compiler two choices it can't resolve
 345 
 346   void store_heap_oop(Address dst, void* dummy);
 347 
 348   void encode_heap_oop(Register r);
 349   void decode_heap_oop(Register r);
 350   void encode_heap_oop_not_null(Register r);
 351   void decode_heap_oop_not_null(Register r);
 352   void encode_heap_oop_not_null(Register dst, Register src);
 353   void decode_heap_oop_not_null(Register dst, Register src);
 354 
 355   void set_narrow_oop(Register dst, jobject obj);
 356   void set_narrow_oop(Address dst, jobject obj);
 357   void cmp_narrow_oop(Register dst, jobject obj);
 358   void cmp_narrow_oop(Address dst, jobject obj);
 359 
 360   void encode_klass_not_null(Register r);
 361   void decode_klass_not_null(Register r);
 362   void encode_klass_not_null(Register dst, Register src);
 363   void decode_klass_not_null(Register dst, Register src);
 364   void set_narrow_klass(Register dst, Klass* k);
 365   void set_narrow_klass(Address dst, Klass* k);
 366   void cmp_narrow_klass(Register dst, Klass* k);
 367   void cmp_narrow_klass(Address dst, Klass* k);
 368 
 369   // Returns the byte size of the instructions generated by decode_klass_not_null()
 370   // when compressed klass pointers are being used.
 371   static int instr_size_for_decode_klass_not_null();
 372 
 373   // if heap base register is used - reinit it with the correct value
 374   void reinit_heapbase();
 375 
 376   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 377 
 378 #endif // _LP64
 379 
 380   // Int division/remainder for Java
 381   // (as idivl, but checks for special case as described in JVM spec.)
 382   // returns idivl instruction offset for implicit exception handling
 383   int corrected_idivl(Register reg);
 384 
 385   // Long division/remainder for Java
 386   // (as idivq, but checks for special case as described in JVM spec.)
 387   // returns idivq instruction offset for implicit exception handling
 388   int corrected_idivq(Register reg);
 389 
 390   void int3();
 391 
 392   // Long operation macros for a 32bit cpu
 393   // Long negation for Java
 394   void lneg(Register hi, Register lo);
 395 
 396   // Long multiplication for Java
 397   // (destroys contents of eax, ebx, ecx and edx)
 398   void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
 399 
 400   // Long shifts for Java
 401   // (semantics as described in JVM spec.)
 402   void lshl(Register hi, Register lo);                               // hi:lo << (rcx & 0x3f)
 403   void lshr(Register hi, Register lo, bool sign_extension = false);  // hi:lo >> (rcx & 0x3f)
 404 
 405   // Long compare for Java
 406   // (semantics as described in JVM spec.)
 407   void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
 408 
 409 
 410   // misc
 411 
 412   // Sign extension
 413   void sign_extend_short(Register reg);
 414   void sign_extend_byte(Register reg);
 415 
 416   // Division by power of 2, rounding towards 0
 417   void division_with_shift(Register reg, int shift_value);
 418 
 419   // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
 420   //
 421   // CF (corresponds to C0) if x < y
 422   // PF (corresponds to C2) if unordered
 423   // ZF (corresponds to C3) if x = y
 424   //
 425   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 426   // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
 427   void fcmp(Register tmp);
 428   // Variant of the above which allows y to be further down the stack
 429   // and which only pops x and y if specified. If pop_right is
 430   // specified then pop_left must also be specified.
 431   void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
 432 
 433   // Floating-point comparison for Java
 434   // Compares the top-most stack entries on the FPU stack and stores the result in dst.
 435   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 436   // (semantics as described in JVM spec.)
 437   void fcmp2int(Register dst, bool unordered_is_less);
 438   // Variant of the above which allows y to be further down the stack
 439   // and which only pops x and y if specified. If pop_right is
 440   // specified then pop_left must also be specified.
 441   void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
 442 
 443   // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
 444   // tmp is a temporary register, if none is available use noreg
 445   void fremr(Register tmp);
 446 
 447   // dst = c = a * b + c
 448   void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 449   void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 450 
 451   void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 452   void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 453   void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 454   void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 455 
 456 
 457   // same as fcmp2int, but using SSE2
 458   void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 459   void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 460 
 461   // branch to L if FPU flag C2 is set/not set
 462   // tmp is a temporary register, if none is available use noreg
 463   void jC2 (Register tmp, Label& L);
 464   void jnC2(Register tmp, Label& L);
 465 
 466   // Pop ST (ffree & fincstp combined)
 467   void fpop();
 468 
 469   // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
 470   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 471   void load_float(Address src);
 472 
 473   // Store float value to 'address'. If UseSSE >= 1, the value is stored
 474   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 475   void store_float(Address dst);
 476 
 477   // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
 478   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 479   void load_double(Address src);
 480 
 481   // Store double value to 'address'. If UseSSE >= 2, the value is stored
 482   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 483   void store_double(Address dst);
 484 
 485   // Save/restore ZMM (512bit) register on stack.
 486   void push_zmm(XMMRegister reg);
 487   void pop_zmm(XMMRegister reg);
 488 
 489   // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
 490   void push_fTOS();
 491 
 492   // pops double TOS element from CPU stack and pushes on FPU stack
 493   void pop_fTOS();
 494 
 495   void empty_FPU_stack();
 496 
 497   void push_IU_state();
 498   void pop_IU_state();
 499 
 500   void push_FPU_state();
 501   void pop_FPU_state();
 502 
 503   void push_CPU_state();
 504   void pop_CPU_state();
 505 
 506   // Round up to a power of two
 507   void round_to(Register reg, int modulus);
 508 
 509   // Callee saved registers handling
 510   void push_callee_saved_registers();
 511   void pop_callee_saved_registers();
 512 
 513   // allocation
 514   void eden_allocate(
 515     Register thread,                   // Current thread
 516     Register obj,                      // result: pointer to object after successful allocation
 517     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 518     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 519     Register t1,                       // temp register
 520     Label&   slow_case                 // continuation point if fast allocation fails
 521   );
 522   void tlab_allocate(
 523     Register thread,                   // Current thread
 524     Register obj,                      // result: pointer to object after successful allocation
 525     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 526     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 527     Register t1,                       // temp register
 528     Register t2,                       // temp register
 529     Label&   slow_case                 // continuation point if fast allocation fails
 530   );
 531   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 532 
 533   // interface method calling
 534   void lookup_interface_method(Register recv_klass,
 535                                Register intf_klass,
 536                                RegisterOrConstant itable_index,
 537                                Register method_result,
 538                                Register scan_temp,
 539                                Label& no_such_interface,
 540                                bool return_method = true);
 541 
 542   // virtual method calling
 543   void lookup_virtual_method(Register recv_klass,
 544                              RegisterOrConstant vtable_index,
 545                              Register method_result);
 546 
 547   // Test sub_klass against super_klass, with fast and slow paths.
 548 
 549   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 550   // One of the three labels can be NULL, meaning take the fall-through.
 551   // If super_check_offset is -1, the value is loaded up from super_klass.
 552   // No registers are killed, except temp_reg.
 553   void check_klass_subtype_fast_path(Register sub_klass,
 554                                      Register super_klass,
 555                                      Register temp_reg,
 556                                      Label* L_success,
 557                                      Label* L_failure,
 558                                      Label* L_slow_path,
 559                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 560 
 561   // The rest of the type check; must be wired to a corresponding fast path.
 562   // It does not repeat the fast path logic, so don't use it standalone.
 563   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 564   // Updates the sub's secondary super cache as necessary.
 565   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 566   void check_klass_subtype_slow_path(Register sub_klass,
 567                                      Register super_klass,
 568                                      Register temp_reg,
 569                                      Register temp2_reg,
 570                                      Label* L_success,
 571                                      Label* L_failure,
 572                                      bool set_cond_codes = false);
 573 
 574   // Simplified, combined version, good for typical uses.
 575   // Falls through on failure.
 576   void check_klass_subtype(Register sub_klass,
 577                            Register super_klass,
 578                            Register temp_reg,
 579                            Label& L_success);
 580 
 581   // method handles (JSR 292)
 582   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 583 
 584   //----
 585   void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
 586 
 587   // Debugging
 588 
 589   // only if +VerifyOops
 590   // TODO: Make these macros with file and line like sparc version!
 591   void verify_oop(Register reg, const char* s = "broken oop");
 592   void verify_oop_addr(Address addr, const char * s = "broken oop addr");
 593 
 594   // TODO: verify method and klass metadata (compare against vptr?)
 595   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 596   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 597 
 598 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 599 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 600 
 601   // only if +VerifyFPU
 602   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
 603 
 604   // Verify or restore cpu control state after JNI call
 605   void restore_cpu_control_state_after_jni();
 606 
 607   // prints msg, dumps registers and stops execution
 608   void stop(const char* msg);
 609 
 610   // prints msg and continues
 611   void warn(const char* msg);
 612 
 613   // dumps registers and other state
 614   void print_state();
 615 
 616   static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
 617   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 618   static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
 619   static void print_state64(int64_t pc, int64_t regs[]);
 620 
 621   void os_breakpoint();
 622 
 623   void untested()                                { stop("untested"); }
 624 
 625   void unimplemented(const char* what = "");
 626 
 627   void should_not_reach_here()                   { stop("should not reach here"); }
 628 
 629   void print_CPU_state();
 630 
 631   // Stack overflow checking
 632   void bang_stack_with_offset(int offset) {
 633     // stack grows down, caller passes positive offset
 634     assert(offset > 0, "must bang with negative offset");
 635     movl(Address(rsp, (-offset)), rax);
 636   }
 637 
 638   // Writes to stack successive pages until offset reached to check for
 639   // stack overflow + shadow pages.  Also, clobbers tmp
 640   void bang_stack_size(Register size, Register tmp);
 641 
 642   // Check for reserved stack access in method being exited (for JIT)
 643   void reserved_stack_check();
 644 
 645   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
 646                                                 Register tmp,
 647                                                 int offset);
 648 
 649   // Support for serializing memory accesses between threads
 650   void serialize_memory(Register thread, Register tmp);
 651 
 652   // If thread_reg is != noreg the code assumes the register passed contains
 653   // the thread (required on 64 bit).
 654   void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg);
 655 
 656   void verify_tlab();
 657 
 658   // Biased locking support
 659   // lock_reg and obj_reg must be loaded up with the appropriate values.
 660   // swap_reg must be rax, and is killed.
 661   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
 662   // be killed; if not supplied, push/pop will be used internally to
 663   // allocate a temporary (inefficient, avoid if possible).
 664   // Optional slow case is for implementations (interpreter and C1) which branch to
 665   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 666   // Returns offset of first potentially-faulting instruction for null
 667   // check info (currently consumed only by C1). If
 668   // swap_reg_contains_mark is true then returns -1 as it is assumed
 669   // the calling code has already passed any potential faults.
 670   int biased_locking_enter(Register lock_reg, Register obj_reg,
 671                            Register swap_reg, Register tmp_reg,
 672                            bool swap_reg_contains_mark,
 673                            Label& done, Label* slow_case = NULL,
 674                            BiasedLockingCounters* counters = NULL);
 675   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 676 #ifdef COMPILER2
 677   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 678   // See full desription in macroAssembler_x86.cpp.
 679   void fast_lock(Register obj, Register box, Register tmp,
 680                  Register scr, Register cx1, Register cx2,
 681                  BiasedLockingCounters* counters,
 682                  RTMLockingCounters* rtm_counters,
 683                  RTMLockingCounters* stack_rtm_counters,
 684                  Metadata* method_data,
 685                  bool use_rtm, bool profile_rtm);
 686   void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
 687 #if INCLUDE_RTM_OPT
 688   void rtm_counters_update(Register abort_status, Register rtm_counters);
 689   void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
 690   void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
 691                                    RTMLockingCounters* rtm_counters,
 692                                    Metadata* method_data);
 693   void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
 694                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
 695   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
 696   void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
 697   void rtm_stack_locking(Register obj, Register tmp, Register scr,
 698                          Register retry_on_abort_count,
 699                          RTMLockingCounters* stack_rtm_counters,
 700                          Metadata* method_data, bool profile_rtm,
 701                          Label& DONE_LABEL, Label& IsInflated);
 702   void rtm_inflated_locking(Register obj, Register box, Register tmp,
 703                             Register scr, Register retry_on_busy_count,
 704                             Register retry_on_abort_count,
 705                             RTMLockingCounters* rtm_counters,
 706                             Metadata* method_data, bool profile_rtm,
 707                             Label& DONE_LABEL);
 708 #endif
 709 #endif
 710 
 711   Condition negate_condition(Condition cond);
 712 
 713   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 714   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 715   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 716   // here in MacroAssembler. The major exception to this rule is call
 717 
 718   // Arithmetics
 719 
 720 
 721   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 722   void addptr(Address dst, Register src);
 723 
 724   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 725   void addptr(Register dst, int32_t src);
 726   void addptr(Register dst, Register src);
 727   void addptr(Register dst, RegisterOrConstant src) {
 728     if (src.is_constant()) addptr(dst, (int) src.as_constant());
 729     else                   addptr(dst,       src.as_register());
 730   }
 731 
 732   void andptr(Register dst, int32_t src);
 733   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
 734 
 735   void cmp8(AddressLiteral src1, int imm);
 736 
 737   // renamed to drag out the casting of address to int32_t/intptr_t
 738   void cmp32(Register src1, int32_t imm);
 739 
 740   void cmp32(AddressLiteral src1, int32_t imm);
 741   // compare reg - mem, or reg - &mem
 742   void cmp32(Register src1, AddressLiteral src2);
 743 
 744   void cmp32(Register src1, Address src2);
 745 
 746 #ifndef _LP64
 747   void cmpklass(Address dst, Metadata* obj);
 748   void cmpklass(Register dst, Metadata* obj);
 749   void cmpoop(Address dst, jobject obj);
 750   void cmpoop_raw(Address dst, jobject obj);
 751 #endif // _LP64
 752 
 753   void cmpoop(Register src1, Register src2);
 754   void cmpoop(Register src1, Address src2);
 755   void cmpoop(Register dst, jobject obj);
 756   void cmpoop_raw(Register dst, jobject obj);
 757 
 758   // NOTE src2 must be the lval. This is NOT an mem-mem compare
 759   void cmpptr(Address src1, AddressLiteral src2);
 760 
 761   void cmpptr(Register src1, AddressLiteral src2);
 762 
 763   void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 764   void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 765   // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 766 
 767   void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 768   void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 769 
 770   // cmp64 to avoild hiding cmpq
 771   void cmp64(Register src1, AddressLiteral src);
 772 
 773   void cmpxchgptr(Register reg, Address adr);
 774 
 775   void locked_cmpxchgptr(Register reg, AddressLiteral adr);
 776 
 777 
 778   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
 779   void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
 780 
 781 
 782   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
 783 
 784   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
 785 
 786   void shlptr(Register dst, int32_t shift);
 787   void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
 788 
 789   void shrptr(Register dst, int32_t shift);
 790   void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
 791 
 792   void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
 793   void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
 794 
 795   void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 796 
 797   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 798   void subptr(Register dst, int32_t src);
 799   // Force generation of a 4 byte immediate value even if it fits into 8bit
 800   void subptr_imm32(Register dst, int32_t src);
 801   void subptr(Register dst, Register src);
 802   void subptr(Register dst, RegisterOrConstant src) {
 803     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 804     else                   subptr(dst,       src.as_register());
 805   }
 806 
 807   void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 808   void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 809 
 810   void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 811   void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 812 
 813   void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
 814 
 815 
 816 
 817   // Helper functions for statistics gathering.
 818   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 819   void cond_inc32(Condition cond, AddressLiteral counter_addr);
 820   // Unconditional atomic increment.
 821   void atomic_incl(Address counter_addr);
 822   void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
 823 #ifdef _LP64
 824   void atomic_incq(Address counter_addr);
 825   void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
 826 #endif
 827   void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
 828   void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
 829 
 830   void lea(Register dst, AddressLiteral adr);
 831   void lea(Address dst, AddressLiteral adr);
 832   void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
 833 
 834   void leal32(Register dst, Address src) { leal(dst, src); }
 835 
 836   // Import other testl() methods from the parent class or else
 837   // they will be hidden by the following overriding declaration.
 838   using Assembler::testl;
 839   void testl(Register dst, AddressLiteral src);
 840 
 841   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 842   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 843   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 844   void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 845 
 846   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
 847   void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
 848   void testptr(Register src1, Register src2);
 849 
 850   void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 851   void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 852 
 853   // Calls
 854 
 855   void call(Label& L, relocInfo::relocType rtype);
 856   void call(Register entry);
 857 
 858   // NOTE: this call transfers to the effective address of entry NOT
 859   // the address contained by entry. This is because this is more natural
 860   // for jumps/calls.
 861   void call(AddressLiteral entry);
 862 
 863   // Emit the CompiledIC call idiom
 864   void ic_call(address entry, jint method_index = 0);
 865 
 866   // Jumps
 867 
 868   // NOTE: these jumps tranfer to the effective address of dst NOT
 869   // the address contained by dst. This is because this is more natural
 870   // for jumps/calls.
 871   void jump(AddressLiteral dst);
 872   void jump_cc(Condition cc, AddressLiteral dst);
 873 
 874   // 32bit can do a case table jump in one instruction but we no longer allow the base
 875   // to be installed in the Address class. This jump will tranfers to the address
 876   // contained in the location described by entry (not the address of entry)
 877   void jump(ArrayAddress entry);
 878 
 879   // Floating
 880 
 881   void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
 882   void andpd(XMMRegister dst, AddressLiteral src);
 883   void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
 884 
 885   void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
 886   void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
 887   void andps(XMMRegister dst, AddressLiteral src);
 888 
 889   void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
 890   void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
 891   void comiss(XMMRegister dst, AddressLiteral src);
 892 
 893   void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
 894   void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
 895   void comisd(XMMRegister dst, AddressLiteral src);
 896 
 897   void fadd_s(Address src)        { Assembler::fadd_s(src); }
 898   void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
 899 
 900   void fldcw(Address src) { Assembler::fldcw(src); }
 901   void fldcw(AddressLiteral src);
 902 
 903   void fld_s(int index)   { Assembler::fld_s(index); }
 904   void fld_s(Address src) { Assembler::fld_s(src); }
 905   void fld_s(AddressLiteral src);
 906 
 907   void fld_d(Address src) { Assembler::fld_d(src); }
 908   void fld_d(AddressLiteral src);
 909 
 910   void fld_x(Address src) { Assembler::fld_x(src); }
 911   void fld_x(AddressLiteral src);
 912 
 913   void fmul_s(Address src)        { Assembler::fmul_s(src); }
 914   void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
 915 
 916   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
 917   void ldmxcsr(AddressLiteral src);
 918 
 919 #ifdef _LP64
 920  private:
 921   void sha256_AVX2_one_round_compute(
 922     Register  reg_old_h,
 923     Register  reg_a,
 924     Register  reg_b,
 925     Register  reg_c,
 926     Register  reg_d,
 927     Register  reg_e,
 928     Register  reg_f,
 929     Register  reg_g,
 930     Register  reg_h,
 931     int iter);
 932   void sha256_AVX2_four_rounds_compute_first(int start);
 933   void sha256_AVX2_four_rounds_compute_last(int start);
 934   void sha256_AVX2_one_round_and_sched(
 935         XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
 936         XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
 937         XMMRegister xmm_2,     /* ymm6 */
 938         XMMRegister xmm_3,     /* ymm7 */
 939         Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
 940         Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
 941         Register    reg_c,      /* edi */
 942         Register    reg_d,      /* esi */
 943         Register    reg_e,      /* r8d */
 944         Register    reg_f,      /* r9d */
 945         Register    reg_g,      /* r10d */
 946         Register    reg_h,      /* r11d */
 947         int iter);
 948 
 949   void addm(int disp, Register r1, Register r2);
 950 
 951  public:
 952   void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 953                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 954                    Register buf, Register state, Register ofs, Register limit, Register rsp,
 955                    bool multi_block, XMMRegister shuf_mask);
 956 #endif
 957 
 958 #ifdef _LP64
 959  private:
 960   void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
 961                                      Register e, Register f, Register g, Register h, int iteration);
 962 
 963   void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
 964                                           Register a, Register b, Register c, Register d, Register e, Register f,
 965                                           Register g, Register h, int iteration);
 966 
 967   void addmq(int disp, Register r1, Register r2);
 968  public:
 969   void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 970                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 971                    Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
 972                    XMMRegister shuf_mask);
 973 #endif
 974 
 975   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
 976                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
 977                  Register buf, Register state, Register ofs, Register limit, Register rsp,
 978                  bool multi_block);
 979 
 980 #ifdef _LP64
 981   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 982                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 983                    Register buf, Register state, Register ofs, Register limit, Register rsp,
 984                    bool multi_block, XMMRegister shuf_mask);
 985 #else
 986   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 987                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 988                    Register buf, Register state, Register ofs, Register limit, Register rsp,
 989                    bool multi_block);
 990 #endif
 991 
 992   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
 993                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
 994                 Register rax, Register rcx, Register rdx, Register tmp);
 995 
 996 #ifdef _LP64
 997   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
 998                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
 999                 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2);
1000 
1001   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1002                   XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1003                   Register rax, Register rcx, Register rdx, Register r11);
1004 
1005   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1006                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1007                 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4);
1008 
1009   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1010                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1011                 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2,
1012                 Register tmp3, Register tmp4);
1013 
1014   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1015                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1016                 Register rax, Register rcx, Register rdx, Register tmp1,
1017                 Register tmp2, Register tmp3, Register tmp4);
1018   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1019                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1020                 Register rax, Register rcx, Register rdx, Register tmp1,
1021                 Register tmp2, Register tmp3, Register tmp4);
1022 #else
1023   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1024                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1025                 Register rax, Register rcx, Register rdx, Register tmp1);
1026 
1027   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1028                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1029                 Register rax, Register rcx, Register rdx, Register tmp);
1030 
1031   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1032                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1033                 Register rdx, Register tmp);
1034 
1035   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1036                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1037                 Register rax, Register rbx, Register rdx);
1038 
1039   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1040                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1041                 Register rax, Register rcx, Register rdx, Register tmp);
1042 
1043   void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1044                         Register edx, Register ebx, Register esi, Register edi,
1045                         Register ebp, Register esp);
1046 
1047   void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1048                          Register esi, Register edi, Register ebp, Register esp);
1049 
1050   void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1051                         Register edx, Register ebx, Register esi, Register edi,
1052                         Register ebp, Register esp);
1053 
1054   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1055                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1056                 Register rax, Register rcx, Register rdx, Register tmp);
1057 #endif
1058 
1059   void increase_precision();
1060   void restore_precision();
1061 
1062 private:
1063 
1064   // these are private because users should be doing movflt/movdbl
1065 
1066   void movss(Address dst, XMMRegister src)     { Assembler::movss(dst, src); }
1067   void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1068   void movss(XMMRegister dst, Address src)     { Assembler::movss(dst, src); }
1069   void movss(XMMRegister dst, AddressLiteral src);
1070 
1071   void movlpd(XMMRegister dst, Address src)    {Assembler::movlpd(dst, src); }
1072   void movlpd(XMMRegister dst, AddressLiteral src);
1073 
1074 public:
1075 
1076   void addsd(XMMRegister dst, XMMRegister src)    { Assembler::addsd(dst, src); }
1077   void addsd(XMMRegister dst, Address src)        { Assembler::addsd(dst, src); }
1078   void addsd(XMMRegister dst, AddressLiteral src);
1079 
1080   void addss(XMMRegister dst, XMMRegister src)    { Assembler::addss(dst, src); }
1081   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
1082   void addss(XMMRegister dst, AddressLiteral src);
1083 
1084   void addpd(XMMRegister dst, XMMRegister src)    { Assembler::addpd(dst, src); }
1085   void addpd(XMMRegister dst, Address src)        { Assembler::addpd(dst, src); }
1086   void addpd(XMMRegister dst, AddressLiteral src);
1087 
1088   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
1089   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
1090   void divsd(XMMRegister dst, AddressLiteral src);
1091 
1092   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
1093   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
1094   void divss(XMMRegister dst, AddressLiteral src);
1095 
1096   // Move Unaligned Double Quadword
1097   void movdqu(Address     dst, XMMRegister src);
1098   void movdqu(XMMRegister dst, Address src);
1099   void movdqu(XMMRegister dst, XMMRegister src);
1100   void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);
1101   // AVX Unaligned forms
1102   void vmovdqu(Address     dst, XMMRegister src);
1103   void vmovdqu(XMMRegister dst, Address src);
1104   void vmovdqu(XMMRegister dst, XMMRegister src);
1105   void vmovdqu(XMMRegister dst, AddressLiteral src);
1106   void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1107   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1108   void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1109   void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch);
1110 
1111   // Move Aligned Double Quadword
1112   void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
1113   void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
1114   void movdqa(XMMRegister dst, AddressLiteral src);
1115 
1116   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1117   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
1118   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
1119   void movsd(XMMRegister dst, AddressLiteral src);
1120 
1121   void mulpd(XMMRegister dst, XMMRegister src)    { Assembler::mulpd(dst, src); }
1122   void mulpd(XMMRegister dst, Address src)        { Assembler::mulpd(dst, src); }
1123   void mulpd(XMMRegister dst, AddressLiteral src);
1124 
1125   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
1126   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
1127   void mulsd(XMMRegister dst, AddressLiteral src);
1128 
1129   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }
1130   void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
1131   void mulss(XMMRegister dst, AddressLiteral src);
1132 
1133   // Carry-Less Multiplication Quadword
1134   void pclmulldq(XMMRegister dst, XMMRegister src) {
1135     // 0x00 - multiply lower 64 bits [0:63]
1136     Assembler::pclmulqdq(dst, src, 0x00);
1137   }
1138   void pclmulhdq(XMMRegister dst, XMMRegister src) {
1139     // 0x11 - multiply upper 64 bits [64:127]
1140     Assembler::pclmulqdq(dst, src, 0x11);
1141   }
1142 
1143   void pcmpeqb(XMMRegister dst, XMMRegister src);
1144   void pcmpeqw(XMMRegister dst, XMMRegister src);
1145 
1146   void pcmpestri(XMMRegister dst, Address src, int imm8);
1147   void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1148 
1149   void pmovzxbw(XMMRegister dst, XMMRegister src);
1150   void pmovzxbw(XMMRegister dst, Address src);
1151 
1152   void pmovmskb(Register dst, XMMRegister src);
1153 
1154   void ptest(XMMRegister dst, XMMRegister src);
1155 
1156   void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
1157   void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
1158   void sqrtsd(XMMRegister dst, AddressLiteral src);
1159 
1160   void sqrtss(XMMRegister dst, XMMRegister src)    { Assembler::sqrtss(dst, src); }
1161   void sqrtss(XMMRegister dst, Address src)        { Assembler::sqrtss(dst, src); }
1162   void sqrtss(XMMRegister dst, AddressLiteral src);
1163 
1164   void subsd(XMMRegister dst, XMMRegister src)    { Assembler::subsd(dst, src); }
1165   void subsd(XMMRegister dst, Address src)        { Assembler::subsd(dst, src); }
1166   void subsd(XMMRegister dst, AddressLiteral src);
1167 
1168   void subss(XMMRegister dst, XMMRegister src)    { Assembler::subss(dst, src); }
1169   void subss(XMMRegister dst, Address src)        { Assembler::subss(dst, src); }
1170   void subss(XMMRegister dst, AddressLiteral src);
1171 
1172   void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1173   void ucomiss(XMMRegister dst, Address src)     { Assembler::ucomiss(dst, src); }
1174   void ucomiss(XMMRegister dst, AddressLiteral src);
1175 
1176   void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1177   void ucomisd(XMMRegister dst, Address src)     { Assembler::ucomisd(dst, src); }
1178   void ucomisd(XMMRegister dst, AddressLiteral src);
1179 
1180   // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1181   void xorpd(XMMRegister dst, XMMRegister src);
1182   void xorpd(XMMRegister dst, Address src)     { Assembler::xorpd(dst, src); }
1183   void xorpd(XMMRegister dst, AddressLiteral src);
1184 
1185   // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1186   void xorps(XMMRegister dst, XMMRegister src);
1187   void xorps(XMMRegister dst, Address src)     { Assembler::xorps(dst, src); }
1188   void xorps(XMMRegister dst, AddressLiteral src);
1189 
1190   // Shuffle Bytes
1191   void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1192   void pshufb(XMMRegister dst, Address src)     { Assembler::pshufb(dst, src); }
1193   void pshufb(XMMRegister dst, AddressLiteral src);
1194   // AVX 3-operands instructions
1195 
1196   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1197   void vaddsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vaddsd(dst, nds, src); }
1198   void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1199 
1200   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1201   void vaddss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vaddss(dst, nds, src); }
1202   void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1203 
1204   void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1205   void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1206 
1207   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1208   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1209 
1210   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1211   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1212 
1213   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1214   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1215   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1216 
1217   void vpbroadcastw(XMMRegister dst, XMMRegister src);
1218 
1219   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1220   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1221 
1222   void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1223   void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1224 
1225   void vpmovmskb(Register dst, XMMRegister src);
1226 
1227   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1228   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1229 
1230   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1231   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1232 
1233   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1234   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1235 
1236   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1237   void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1238 
1239   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1240   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1241 
1242   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1243   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1244 
1245   void vptest(XMMRegister dst, XMMRegister src);
1246 
1247   void punpcklbw(XMMRegister dst, XMMRegister src);
1248   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1249 
1250   void pshufd(XMMRegister dst, Address src, int mode);
1251   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1252 
1253   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1254   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1255 
1256   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1257   void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandpd(dst, nds, src, vector_len); }
1258   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1259 
1260   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1261   void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandps(dst, nds, src, vector_len); }
1262   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1263 
1264   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1265   void vdivsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivsd(dst, nds, src); }
1266   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1267 
1268   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1269   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
1270   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1271 
1272   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1273   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
1274   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1275 
1276   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1277   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
1278   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1279 
1280   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1281   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
1282   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1283 
1284   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1285   void vsubss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubss(dst, nds, src); }
1286   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1287 
1288   void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1289   void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1290 
1291   // AVX Vector instructions
1292 
1293   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1294   void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1295   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1296 
1297   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1298   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1299   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1300 
1301   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1302     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1303       Assembler::vpxor(dst, nds, src, vector_len);
1304     else
1305       Assembler::vxorpd(dst, nds, src, vector_len);
1306   }
1307   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1308     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1309       Assembler::vpxor(dst, nds, src, vector_len);
1310     else
1311       Assembler::vxorpd(dst, nds, src, vector_len);
1312   }
1313 
1314   // Simple version for AVX2 256bit vectors
1315   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1316   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1317 
1318   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1319     if (UseAVX > 2) {
1320       Assembler::vinserti32x4(dst, dst, src, imm8);
1321     } else if (UseAVX > 1) {
1322       // vinserti128 is available only in AVX2
1323       Assembler::vinserti128(dst, nds, src, imm8);
1324     } else {
1325       Assembler::vinsertf128(dst, nds, src, imm8);
1326     }
1327   }
1328 
1329   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1330     if (UseAVX > 2) {
1331       Assembler::vinserti32x4(dst, dst, src, imm8);
1332     } else if (UseAVX > 1) {
1333       // vinserti128 is available only in AVX2
1334       Assembler::vinserti128(dst, nds, src, imm8);
1335     } else {
1336       Assembler::vinsertf128(dst, nds, src, imm8);
1337     }
1338   }
1339 
1340   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1341     if (UseAVX > 2) {
1342       Assembler::vextracti32x4(dst, src, imm8);
1343     } else if (UseAVX > 1) {
1344       // vextracti128 is available only in AVX2
1345       Assembler::vextracti128(dst, src, imm8);
1346     } else {
1347       Assembler::vextractf128(dst, src, imm8);
1348     }
1349   }
1350 
1351   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1352     if (UseAVX > 2) {
1353       Assembler::vextracti32x4(dst, src, imm8);
1354     } else if (UseAVX > 1) {
1355       // vextracti128 is available only in AVX2
1356       Assembler::vextracti128(dst, src, imm8);
1357     } else {
1358       Assembler::vextractf128(dst, src, imm8);
1359     }
1360   }
1361 
1362   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1363   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1364     vinserti128(dst, dst, src, 1);
1365   }
1366   void vinserti128_high(XMMRegister dst, Address src) {
1367     vinserti128(dst, dst, src, 1);
1368   }
1369   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1370     vextracti128(dst, src, 1);
1371   }
1372   void vextracti128_high(Address dst, XMMRegister src) {
1373     vextracti128(dst, src, 1);
1374   }
1375 
1376   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1377     if (UseAVX > 2) {
1378       Assembler::vinsertf32x4(dst, dst, src, 1);
1379     } else {
1380       Assembler::vinsertf128(dst, dst, src, 1);
1381     }
1382   }
1383 
1384   void vinsertf128_high(XMMRegister dst, Address src) {
1385     if (UseAVX > 2) {
1386       Assembler::vinsertf32x4(dst, dst, src, 1);
1387     } else {
1388       Assembler::vinsertf128(dst, dst, src, 1);
1389     }
1390   }
1391 
1392   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1393     if (UseAVX > 2) {
1394       Assembler::vextractf32x4(dst, src, 1);
1395     } else {
1396       Assembler::vextractf128(dst, src, 1);
1397     }
1398   }
1399 
1400   void vextractf128_high(Address dst, XMMRegister src) {
1401     if (UseAVX > 2) {
1402       Assembler::vextractf32x4(dst, src, 1);
1403     } else {
1404       Assembler::vextractf128(dst, src, 1);
1405     }
1406   }
1407 
1408   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1409   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1410     Assembler::vinserti64x4(dst, dst, src, 1);
1411   }
1412   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1413     Assembler::vinsertf64x4(dst, dst, src, 1);
1414   }
1415   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1416     Assembler::vextracti64x4(dst, src, 1);
1417   }
1418   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1419     Assembler::vextractf64x4(dst, src, 1);
1420   }
1421   void vextractf64x4_high(Address dst, XMMRegister src) {
1422     Assembler::vextractf64x4(dst, src, 1);
1423   }
1424   void vinsertf64x4_high(XMMRegister dst, Address src) {
1425     Assembler::vinsertf64x4(dst, dst, src, 1);
1426   }
1427 
1428   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1429   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1430     vinserti128(dst, dst, src, 0);
1431   }
1432   void vinserti128_low(XMMRegister dst, Address src) {
1433     vinserti128(dst, dst, src, 0);
1434   }
1435   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1436     vextracti128(dst, src, 0);
1437   }
1438   void vextracti128_low(Address dst, XMMRegister src) {
1439     vextracti128(dst, src, 0);
1440   }
1441 
1442   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1443     if (UseAVX > 2) {
1444       Assembler::vinsertf32x4(dst, dst, src, 0);
1445     } else {
1446       Assembler::vinsertf128(dst, dst, src, 0);
1447     }
1448   }
1449 
1450   void vinsertf128_low(XMMRegister dst, Address src) {
1451     if (UseAVX > 2) {
1452       Assembler::vinsertf32x4(dst, dst, src, 0);
1453     } else {
1454       Assembler::vinsertf128(dst, dst, src, 0);
1455     }
1456   }
1457 
1458   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1459     if (UseAVX > 2) {
1460       Assembler::vextractf32x4(dst, src, 0);
1461     } else {
1462       Assembler::vextractf128(dst, src, 0);
1463     }
1464   }
1465 
1466   void vextractf128_low(Address dst, XMMRegister src) {
1467     if (UseAVX > 2) {
1468       Assembler::vextractf32x4(dst, src, 0);
1469     } else {
1470       Assembler::vextractf128(dst, src, 0);
1471     }
1472   }
1473 
1474   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1475   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1476     Assembler::vinserti64x4(dst, dst, src, 0);
1477   }
1478   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1479     Assembler::vinsertf64x4(dst, dst, src, 0);
1480   }
1481   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1482     Assembler::vextracti64x4(dst, src, 0);
1483   }
1484   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1485     Assembler::vextractf64x4(dst, src, 0);
1486   }
1487   void vextractf64x4_low(Address dst, XMMRegister src) {
1488     Assembler::vextractf64x4(dst, src, 0);
1489   }
1490   void vinsertf64x4_low(XMMRegister dst, Address src) {
1491     Assembler::vinsertf64x4(dst, dst, src, 0);
1492   }
1493 
1494   // Carry-Less Multiplication Quadword
1495   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1496     // 0x00 - multiply lower 64 bits [0:63]
1497     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1498   }
1499   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1500     // 0x11 - multiply upper 64 bits [64:127]
1501     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1502   }
1503   void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1504     // 0x00 - multiply lower 64 bits [0:63]
1505     Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1506   }
1507   void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1508     // 0x11 - multiply upper 64 bits [64:127]
1509     Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1510   }
1511 
1512   // Data
1513 
1514   void cmov32( Condition cc, Register dst, Address  src);
1515   void cmov32( Condition cc, Register dst, Register src);
1516 
1517   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1518 
1519   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1520   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1521 
1522   void movoop(Register dst, jobject obj);
1523   void movoop(Address dst, jobject obj);
1524 
1525   void mov_metadata(Register dst, Metadata* obj);
1526   void mov_metadata(Address dst, Metadata* obj);
1527 
1528   void movptr(ArrayAddress dst, Register src);
1529   // can this do an lea?
1530   void movptr(Register dst, ArrayAddress src);
1531 
1532   void movptr(Register dst, Address src);
1533 
1534 #ifdef _LP64
1535   void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
1536 #else
1537   void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
1538 #endif
1539 
1540   void movptr(Register dst, intptr_t src);
1541   void movptr(Register dst, Register src);
1542   void movptr(Address dst, intptr_t src);
1543 
1544   void movptr(Address dst, Register src);
1545 
1546   void movptr(Register dst, RegisterOrConstant src) {
1547     if (src.is_constant()) movptr(dst, src.as_constant());
1548     else                   movptr(dst, src.as_register());
1549   }
1550 
1551 #ifdef _LP64
1552   // Generally the next two are only used for moving NULL
1553   // Although there are situations in initializing the mark word where
1554   // they could be used. They are dangerous.
1555 
1556   // They only exist on LP64 so that int32_t and intptr_t are not the same
1557   // and we have ambiguous declarations.
1558 
1559   void movptr(Address dst, int32_t imm32);
1560   void movptr(Register dst, int32_t imm32);
1561 #endif // _LP64
1562 
1563   // to avoid hiding movl
1564   void mov32(AddressLiteral dst, Register src);
1565   void mov32(Register dst, AddressLiteral src);
1566 
1567   // to avoid hiding movb
1568   void movbyte(ArrayAddress dst, int src);
1569 
1570   // Import other mov() methods from the parent class or else
1571   // they will be hidden by the following overriding declaration.
1572   using Assembler::movdl;
1573   using Assembler::movq;
1574   void movdl(XMMRegister dst, AddressLiteral src);
1575   void movq(XMMRegister dst, AddressLiteral src);
1576 
1577   // Can push value or effective address
1578   void pushptr(AddressLiteral src);
1579 
1580   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1581   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1582 
1583   void pushoop(jobject obj);
1584   void pushklass(Metadata* obj);
1585 
1586   // sign extend as need a l to ptr sized element
1587   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1588   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1589 
1590   // C2 compiled method's prolog code.
1591   void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
1592 
1593   // clear memory of size 'cnt' qwords, starting at 'base';
1594   // if 'is_large' is set, do not try to produce short loop
1595   void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large);
1596 
1597   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1598   void xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp);
1599 
1600 #ifdef COMPILER2
1601   void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
1602                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
1603 
1604   // IndexOf strings.
1605   // Small strings are loaded through stack if they cross page boundary.
1606   void string_indexof(Register str1, Register str2,
1607                       Register cnt1, Register cnt2,
1608                       int int_cnt2,  Register result,
1609                       XMMRegister vec, Register tmp,
1610                       int ae);
1611 
1612   // IndexOf for constant substrings with size >= 8 elements
1613   // which don't need to be loaded through stack.
1614   void string_indexofC8(Register str1, Register str2,
1615                       Register cnt1, Register cnt2,
1616                       int int_cnt2,  Register result,
1617                       XMMRegister vec, Register tmp,
1618                       int ae);
1619 
1620     // Smallest code: we don't need to load through stack,
1621     // check string tail.
1622 
1623   // helper function for string_compare
1624   void load_next_elements(Register elem1, Register elem2, Register str1, Register str2,
1625                           Address::ScaleFactor scale, Address::ScaleFactor scale1,
1626                           Address::ScaleFactor scale2, Register index, int ae);
1627   // Compare strings.
1628   void string_compare(Register str1, Register str2,
1629                       Register cnt1, Register cnt2, Register result,
1630                       XMMRegister vec1, int ae);
1631 
1632   // Search for Non-ASCII character (Negative byte value) in a byte array,
1633   // return true if it has any and false otherwise.
1634   void has_negatives(Register ary1, Register len,
1635                      Register result, Register tmp1,
1636                      XMMRegister vec1, XMMRegister vec2);
1637 
1638   // Compare char[] or byte[] arrays.
1639   void arrays_equals(bool is_array_equ, Register ary1, Register ary2,
1640                      Register limit, Register result, Register chr,
1641                      XMMRegister vec1, XMMRegister vec2, bool is_char);
1642 
1643 #endif
1644 
1645   // Fill primitive arrays
1646   void generate_fill(BasicType t, bool aligned,
1647                      Register to, Register value, Register count,
1648                      Register rtmp, XMMRegister xtmp);
1649 
1650   void encode_iso_array(Register src, Register dst, Register len,
1651                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1652                         XMMRegister tmp4, Register tmp5, Register result);
1653 
1654 #ifdef _LP64
1655   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1656   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1657                              Register y, Register y_idx, Register z,
1658                              Register carry, Register product,
1659                              Register idx, Register kdx);
1660   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1661                               Register yz_idx, Register idx,
1662                               Register carry, Register product, int offset);
1663   void multiply_128_x_128_bmi2_loop(Register y, Register z,
1664                                     Register carry, Register carry2,
1665                                     Register idx, Register jdx,
1666                                     Register yz_idx1, Register yz_idx2,
1667                                     Register tmp, Register tmp3, Register tmp4);
1668   void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1669                                Register yz_idx, Register idx, Register jdx,
1670                                Register carry, Register product,
1671                                Register carry2);
1672   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
1673                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1674   void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1675                      Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1676   void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1677                             Register tmp2);
1678   void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1679                        Register rdxReg, Register raxReg);
1680   void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1681   void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1682                        Register tmp3, Register tmp4);
1683   void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1684                      Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1685 
1686   void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1687                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1688                Register raxReg);
1689   void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1690                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1691                Register raxReg);
1692   void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1693                            Register result, Register tmp1, Register tmp2,
1694                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1695 #endif
1696 
1697   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1698   void update_byte_crc32(Register crc, Register val, Register table);
1699   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1700   // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1701   // Note on a naming convention:
1702   // Prefix w = register only used on a Westmere+ architecture
1703   // Prefix n = register only used on a Nehalem architecture
1704 #ifdef _LP64
1705   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1706                        Register tmp1, Register tmp2, Register tmp3);
1707 #else
1708   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1709                        Register tmp1, Register tmp2, Register tmp3,
1710                        XMMRegister xtmp1, XMMRegister xtmp2);
1711 #endif
1712   void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1713                         Register in_out,
1714                         uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1715                         XMMRegister w_xtmp2,
1716                         Register tmp1,
1717                         Register n_tmp2, Register n_tmp3);
1718   void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1719                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1720                        Register tmp1, Register tmp2,
1721                        Register n_tmp3);
1722   void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1723                          Register in_out1, Register in_out2, Register in_out3,
1724                          Register tmp1, Register tmp2, Register tmp3,
1725                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1726                          Register tmp4, Register tmp5,
1727                          Register n_tmp6);
1728   void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1729                             Register tmp1, Register tmp2, Register tmp3,
1730                             Register tmp4, Register tmp5, Register tmp6,
1731                             XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1732                             bool is_pclmulqdq_supported);
1733   // Fold 128-bit data chunk
1734   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1735   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1736   // Fold 8-bit data
1737   void fold_8bit_crc32(Register crc, Register table, Register tmp);
1738   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1739   void fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1740 
1741   // Compress char[] array to byte[].
1742   void char_array_compress(Register src, Register dst, Register len,
1743                            XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1744                            XMMRegister tmp4, Register tmp5, Register result);
1745 
1746   // Inflate byte[] array to char[].
1747   void byte_array_inflate(Register src, Register dst, Register len,
1748                           XMMRegister tmp1, Register tmp2);
1749 
1750 };
1751 
1752 /**
1753  * class SkipIfEqual:
1754  *
1755  * Instantiating this class will result in assembly code being output that will
1756  * jump around any code emitted between the creation of the instance and it's
1757  * automatic destruction at the end of a scope block, depending on the value of
1758  * the flag passed to the constructor, which will be checked at run-time.
1759  */
1760 class SkipIfEqual {
1761  private:
1762   MacroAssembler* _masm;
1763   Label _label;
1764 
1765  public:
1766    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1767    ~SkipIfEqual();
1768 };
1769 
1770 #endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP