1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "utilities/macros.hpp"
  30 #include "runtime/rtmLocking.hpp"
  31 
  32 // MacroAssembler extends Assembler by frequently used macros.
  33 //
  34 // Instructions for which a 'better' code sequence exists depending
  35 // on arguments should also go in here.
  36 
  37 class MacroAssembler: public Assembler {
  38   friend class LIR_Assembler;
  39   friend class Runtime1;      // as_Address()
  40 
  41  protected:
  42 
  43   Address as_Address(AddressLiteral adr);
  44   Address as_Address(ArrayAddress adr);
  45 
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore
  50   // additional registers when doing a VM call).
  51 
  52   virtual void call_VM_leaf_base(
  53     address entry_point,               // the entry point
  54     int     number_of_arguments        // the number of arguments to pop after the call
  55   );
  56 
  57   // This is the base routine called by the different versions of call_VM. The interpreter
  58   // may customize this version by overriding it for its purposes (e.g., to save/restore
  59   // additional registers when doing a VM call).
  60   //
  61   // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
  62   // returns the register which contains the thread upon return. If a thread register has been
  63   // specified, the return value will correspond to that register. If no last_java_sp is specified
  64   // (noreg) than rsp will be used instead.
  65   virtual void call_VM_base(           // returns the register containing the thread upon return
  66     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  67     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  68     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  69     address  entry_point,              // the entry point
  70     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  71     bool     check_exceptions          // whether to check for pending exceptions after return
  72   );
  73 
  74   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  75 
  76   // helpers for FPU flag access
  77   // tmp is a temporary register, if none is available use noreg
  78   void save_rax   (Register tmp);
  79   void restore_rax(Register tmp);
  80 
  81  public:
  82   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  83 
  84  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  85  // The implementation is only non-empty for the InterpreterMacroAssembler,
  86  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  87  virtual void check_and_handle_popframe(Register java_thread);
  88  virtual void check_and_handle_earlyret(Register java_thread);
  89 
  90   // Support for NULL-checks
  91   //
  92   // Generates code that causes a NULL OS exception if the content of reg is NULL.
  93   // If the accessed location is M[reg + offset] and the offset is known, provide the
  94   // offset. No explicit code generation is needed if the offset is within a certain
  95   // range (0 <= offset <= page_size).
  96 
  97   void null_check(Register reg, int offset = -1);
  98   static bool needs_explicit_null_check(intptr_t offset);
  99 
 100   // Required platform-specific helpers for Label::patch_instructions.
 101   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 102   void pd_patch_instruction(address branch, address target) {
 103     unsigned char op = branch[0];
 104     assert(op == 0xE8 /* call */ ||
 105         op == 0xE9 /* jmp */ ||
 106         op == 0xEB /* short jmp */ ||
 107         (op & 0xF0) == 0x70 /* short jcc */ ||
 108         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 109         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 110         "Invalid opcode at patch point");
 111 
 112     if (op == 0xEB || (op & 0xF0) == 0x70) {
 113       // short offset operators (jmp and jcc)
 114       char* disp = (char*) &branch[1];
 115       int imm8 = target - (address) &disp[1];
 116       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
 117       *disp = imm8;
 118     } else {
 119       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
 120       int imm32 = target - (address) &disp[1];
 121       *disp = imm32;
 122     }
 123   }
 124 
 125   // The following 4 methods return the offset of the appropriate move instruction
 126 
 127   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 128   int load_unsigned_byte(Register dst, Address src);
 129   int load_unsigned_short(Register dst, Address src);
 130 
 131   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 132   int load_signed_byte(Register dst, Address src);
 133   int load_signed_short(Register dst, Address src);
 134 
 135   // Support for sign-extension (hi:lo = extend_sign(lo))
 136   void extend_sign(Register hi, Register lo);
 137 
 138   // Load and store values by size and signed-ness
 139   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 140   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 141 
 142   // Support for inc/dec with optimal instruction selection depending on value
 143 
 144   void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
 145   void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
 146 
 147   void decrementl(Address dst, int value = 1);
 148   void decrementl(Register reg, int value = 1);
 149 
 150   void decrementq(Register reg, int value = 1);
 151   void decrementq(Address dst, int value = 1);
 152 
 153   void incrementl(Address dst, int value = 1);
 154   void incrementl(Register reg, int value = 1);
 155 
 156   void incrementq(Register reg, int value = 1);
 157   void incrementq(Address dst, int value = 1);
 158 
 159   // special instructions for EVEX
 160   void setvectmask(Register dst, Register src);
 161   void restorevectmask();
 162 
 163   // Support optimal SSE move instructions.
 164   void movflt(XMMRegister dst, XMMRegister src) {
 165     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 166     else                       { movss (dst, src); return; }
 167   }
 168   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 169   void movflt(XMMRegister dst, AddressLiteral src);
 170   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 171 
 172   void movdbl(XMMRegister dst, XMMRegister src) {
 173     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 174     else                       { movsd (dst, src); return; }
 175   }
 176 
 177   void movdbl(XMMRegister dst, AddressLiteral src);
 178 
 179   void movdbl(XMMRegister dst, Address src) {
 180     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 181     else                         { movlpd(dst, src); return; }
 182   }
 183   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 184 
 185   void incrementl(AddressLiteral dst);
 186   void incrementl(ArrayAddress dst);
 187 
 188   void incrementq(AddressLiteral dst);
 189 
 190   // Alignment
 191   void align(int modulus);
 192   void align(int modulus, int target);
 193 
 194   // A 5 byte nop that is safe for patching (see patch_verified_entry)
 195   void fat_nop();
 196 
 197   // Stack frame creation/removal
 198   void enter();
 199   void leave();
 200 
 201   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 202   // The pointer will be loaded into the thread register.
 203   void get_thread(Register thread);
 204 
 205 
 206   // Support for VM calls
 207   //
 208   // It is imperative that all calls into the VM are handled via the call_VM macros.
 209   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 210   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 211 
 212 
 213   void call_VM(Register oop_result,
 214                address entry_point,
 215                bool check_exceptions = true);
 216   void call_VM(Register oop_result,
 217                address entry_point,
 218                Register arg_1,
 219                bool check_exceptions = true);
 220   void call_VM(Register oop_result,
 221                address entry_point,
 222                Register arg_1, Register arg_2,
 223                bool check_exceptions = true);
 224   void call_VM(Register oop_result,
 225                address entry_point,
 226                Register arg_1, Register arg_2, Register arg_3,
 227                bool check_exceptions = true);
 228 
 229   // Overloadings with last_Java_sp
 230   void call_VM(Register oop_result,
 231                Register last_java_sp,
 232                address entry_point,
 233                int number_of_arguments = 0,
 234                bool check_exceptions = true);
 235   void call_VM(Register oop_result,
 236                Register last_java_sp,
 237                address entry_point,
 238                Register arg_1, bool
 239                check_exceptions = true);
 240   void call_VM(Register oop_result,
 241                Register last_java_sp,
 242                address entry_point,
 243                Register arg_1, Register arg_2,
 244                bool check_exceptions = true);
 245   void call_VM(Register oop_result,
 246                Register last_java_sp,
 247                address entry_point,
 248                Register arg_1, Register arg_2, Register arg_3,
 249                bool check_exceptions = true);
 250 
 251   void get_vm_result  (Register oop_result, Register thread);
 252   void get_vm_result_2(Register metadata_result, Register thread);
 253 
 254   // These always tightly bind to MacroAssembler::call_VM_base
 255   // bypassing the virtual implementation
 256   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 257   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 258   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 259   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 260   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 261 
 262   void call_VM_leaf0(address entry_point);
 263   void call_VM_leaf(address entry_point,
 264                     int number_of_arguments = 0);
 265   void call_VM_leaf(address entry_point,
 266                     Register arg_1);
 267   void call_VM_leaf(address entry_point,
 268                     Register arg_1, Register arg_2);
 269   void call_VM_leaf(address entry_point,
 270                     Register arg_1, Register arg_2, Register arg_3);
 271 
 272   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 273   // bypassing the virtual implementation
 274   void super_call_VM_leaf(address entry_point);
 275   void super_call_VM_leaf(address entry_point, Register arg_1);
 276   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 277   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 278   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 279 
 280   // last Java Frame (fills frame anchor)
 281   void set_last_Java_frame(Register thread,
 282                            Register last_java_sp,
 283                            Register last_java_fp,
 284                            address last_java_pc);
 285 
 286   // thread in the default location (r15_thread on 64bit)
 287   void set_last_Java_frame(Register last_java_sp,
 288                            Register last_java_fp,
 289                            address last_java_pc);
 290 
 291   void reset_last_Java_frame(Register thread, bool clear_fp);
 292 
 293   // thread in the default location (r15_thread on 64bit)
 294   void reset_last_Java_frame(bool clear_fp);
 295 
 296   // Stores
 297   void store_check(Register obj);                // store check for obj - register is destroyed afterwards
 298   void store_check(Register obj, Address dst);   // same as above, dst is exact store location (reg. is destroyed)
 299 
 300   void resolve_jobject(Register value, Register thread, Register tmp);
 301   void clear_jweak_tag(Register possibly_jweak);
 302 
 303 #if INCLUDE_ALL_GCS
 304 
 305   void g1_write_barrier_pre(Register obj,
 306                             Register pre_val,
 307                             Register thread,
 308                             Register tmp,
 309                             bool tosca_live,
 310                             bool expand_call);
 311 
 312   void g1_write_barrier_post(Register store_addr,
 313                              Register new_val,
 314                              Register thread,
 315                              Register tmp,
 316                              Register tmp2);
 317 
 318 #endif // INCLUDE_ALL_GCS
 319 
 320   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 321   void c2bool(Register x);
 322 
 323   // C++ bool manipulation
 324 
 325   void movbool(Register dst, Address src);
 326   void movbool(Address dst, bool boolconst);
 327   void movbool(Address dst, Register src);
 328   void testbool(Register dst);
 329 
 330   void resolve_oop_handle(Register result);
 331   void load_mirror(Register mirror, Register method);
 332 
 333   // oop manipulations
 334   void load_klass(Register dst, Register src);
 335   void store_klass(Register dst, Register src);
 336 
 337   enum LoadBarrierOn {
 338     LoadBarrierOnStrongOopRef,
 339     LoadBarrierOnWeakOopRef,
 340     LoadBarrierOnPhantomOopRef
 341   };
 342 
 343   void load_barrier(Register ref, Address ref_addr, bool expand_call, LoadBarrierOn on);
 344 
 345   void load_heap_oop(Register dst, Address src, bool expand_call = false, LoadBarrierOn on = LoadBarrierOnStrongOopRef);
 346   void load_heap_oop_not_null(Register dst, Address src);
 347   void store_heap_oop(Address dst, Register src);
 348   void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg);
 349 
 350   // Used for storing NULL. All other oop constants should be
 351   // stored using routines that take a jobject.
 352   void store_heap_oop_null(Address dst);
 353 
 354   void load_prototype_header(Register dst, Register src);
 355 
 356 #ifdef _LP64
 357   void store_klass_gap(Register dst, Register src);
 358 
 359   // This dummy is to prevent a call to store_heap_oop from
 360   // converting a zero (like NULL) into a Register by giving
 361   // the compiler two choices it can't resolve
 362 
 363   void store_heap_oop(Address dst, void* dummy);
 364 
 365   void encode_heap_oop(Register r);
 366   void decode_heap_oop(Register r);
 367   void encode_heap_oop_not_null(Register r);
 368   void decode_heap_oop_not_null(Register r);
 369   void encode_heap_oop_not_null(Register dst, Register src);
 370   void decode_heap_oop_not_null(Register dst, Register src);
 371 
 372   void set_narrow_oop(Register dst, jobject obj);
 373   void set_narrow_oop(Address dst, jobject obj);
 374   void cmp_narrow_oop(Register dst, jobject obj);
 375   void cmp_narrow_oop(Address dst, jobject obj);
 376 
 377   void encode_klass_not_null(Register r);
 378   void decode_klass_not_null(Register r);
 379   void encode_klass_not_null(Register dst, Register src);
 380   void decode_klass_not_null(Register dst, Register src);
 381   void set_narrow_klass(Register dst, Klass* k);
 382   void set_narrow_klass(Address dst, Klass* k);
 383   void cmp_narrow_klass(Register dst, Klass* k);
 384   void cmp_narrow_klass(Address dst, Klass* k);
 385 
 386   // Returns the byte size of the instructions generated by decode_klass_not_null()
 387   // when compressed klass pointers are being used.
 388   static int instr_size_for_decode_klass_not_null();
 389 
 390   // if heap base register is used - reinit it with the correct value
 391   void reinit_heapbase();
 392 
 393   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 394 
 395 #endif // _LP64
 396 
 397   // Int division/remainder for Java
 398   // (as idivl, but checks for special case as described in JVM spec.)
 399   // returns idivl instruction offset for implicit exception handling
 400   int corrected_idivl(Register reg);
 401 
 402   // Long division/remainder for Java
 403   // (as idivq, but checks for special case as described in JVM spec.)
 404   // returns idivq instruction offset for implicit exception handling
 405   int corrected_idivq(Register reg);
 406 
 407   void int3();
 408 
 409   // Long operation macros for a 32bit cpu
 410   // Long negation for Java
 411   void lneg(Register hi, Register lo);
 412 
 413   // Long multiplication for Java
 414   // (destroys contents of eax, ebx, ecx and edx)
 415   void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
 416 
 417   // Long shifts for Java
 418   // (semantics as described in JVM spec.)
 419   void lshl(Register hi, Register lo);                               // hi:lo << (rcx & 0x3f)
 420   void lshr(Register hi, Register lo, bool sign_extension = false);  // hi:lo >> (rcx & 0x3f)
 421 
 422   // Long compare for Java
 423   // (semantics as described in JVM spec.)
 424   void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
 425 
 426 
 427   // misc
 428 
 429   // Sign extension
 430   void sign_extend_short(Register reg);
 431   void sign_extend_byte(Register reg);
 432 
 433   // Division by power of 2, rounding towards 0
 434   void division_with_shift(Register reg, int shift_value);
 435 
 436   // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
 437   //
 438   // CF (corresponds to C0) if x < y
 439   // PF (corresponds to C2) if unordered
 440   // ZF (corresponds to C3) if x = y
 441   //
 442   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 443   // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
 444   void fcmp(Register tmp);
 445   // Variant of the above which allows y to be further down the stack
 446   // and which only pops x and y if specified. If pop_right is
 447   // specified then pop_left must also be specified.
 448   void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
 449 
 450   // Floating-point comparison for Java
 451   // Compares the top-most stack entries on the FPU stack and stores the result in dst.
 452   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 453   // (semantics as described in JVM spec.)
 454   void fcmp2int(Register dst, bool unordered_is_less);
 455   // Variant of the above which allows y to be further down the stack
 456   // and which only pops x and y if specified. If pop_right is
 457   // specified then pop_left must also be specified.
 458   void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
 459 
 460   // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
 461   // tmp is a temporary register, if none is available use noreg
 462   void fremr(Register tmp);
 463 
 464   // dst = c = a * b + c
 465   void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 466   void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 467 
 468   void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 469   void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 470   void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 471   void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 472 
 473 
 474   // same as fcmp2int, but using SSE2
 475   void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 476   void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 477 
 478   // branch to L if FPU flag C2 is set/not set
 479   // tmp is a temporary register, if none is available use noreg
 480   void jC2 (Register tmp, Label& L);
 481   void jnC2(Register tmp, Label& L);
 482 
 483   // Pop ST (ffree & fincstp combined)
 484   void fpop();
 485 
 486   // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
 487   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 488   void load_float(Address src);
 489 
 490   // Store float value to 'address'. If UseSSE >= 1, the value is stored
 491   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 492   void store_float(Address dst);
 493 
 494   // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
 495   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 496   void load_double(Address src);
 497 
 498   // Store double value to 'address'. If UseSSE >= 2, the value is stored
 499   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 500   void store_double(Address dst);
 501 
 502   // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
 503   void push_fTOS();
 504 
 505   // pops double TOS element from CPU stack and pushes on FPU stack
 506   void pop_fTOS();
 507 
 508   void empty_FPU_stack();
 509 
 510   void push_IU_state();
 511   void pop_IU_state();
 512 
 513   void push_FPU_state();
 514   void pop_FPU_state();
 515 
 516   void push_CPU_state();
 517   void pop_CPU_state();
 518 
 519   // Round up to a power of two
 520   void round_to(Register reg, int modulus);
 521 
 522   // Callee saved registers handling
 523   void push_callee_saved_registers();
 524   void pop_callee_saved_registers();
 525 
 526   // allocation
 527   void eden_allocate(
 528     Register obj,                      // result: pointer to object after successful allocation
 529     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 530     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 531     Register t1,                       // temp register
 532     Label&   slow_case                 // continuation point if fast allocation fails
 533   );
 534   void tlab_allocate(
 535     Register obj,                      // result: pointer to object after successful allocation
 536     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 537     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 538     Register t1,                       // temp register
 539     Register t2,                       // temp register
 540     Label&   slow_case                 // continuation point if fast allocation fails
 541   );
 542   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 543 
 544   void incr_allocated_bytes(Register thread,
 545                             Register var_size_in_bytes, int con_size_in_bytes,
 546                             Register t1 = noreg);
 547 
 548   // interface method calling
 549   void lookup_interface_method(Register recv_klass,
 550                                Register intf_klass,
 551                                RegisterOrConstant itable_index,
 552                                Register method_result,
 553                                Register scan_temp,
 554                                Label& no_such_interface,
 555                                bool return_method = true);
 556 
 557   // virtual method calling
 558   void lookup_virtual_method(Register recv_klass,
 559                              RegisterOrConstant vtable_index,
 560                              Register method_result);
 561 
 562   // Test sub_klass against super_klass, with fast and slow paths.
 563 
 564   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 565   // One of the three labels can be NULL, meaning take the fall-through.
 566   // If super_check_offset is -1, the value is loaded up from super_klass.
 567   // No registers are killed, except temp_reg.
 568   void check_klass_subtype_fast_path(Register sub_klass,
 569                                      Register super_klass,
 570                                      Register temp_reg,
 571                                      Label* L_success,
 572                                      Label* L_failure,
 573                                      Label* L_slow_path,
 574                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 575 
 576   // The rest of the type check; must be wired to a corresponding fast path.
 577   // It does not repeat the fast path logic, so don't use it standalone.
 578   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 579   // Updates the sub's secondary super cache as necessary.
 580   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 581   void check_klass_subtype_slow_path(Register sub_klass,
 582                                      Register super_klass,
 583                                      Register temp_reg,
 584                                      Register temp2_reg,
 585                                      Label* L_success,
 586                                      Label* L_failure,
 587                                      bool set_cond_codes = false);
 588 
 589   // Simplified, combined version, good for typical uses.
 590   // Falls through on failure.
 591   void check_klass_subtype(Register sub_klass,
 592                            Register super_klass,
 593                            Register temp_reg,
 594                            Label& L_success);
 595 
 596   // method handles (JSR 292)
 597   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 598 
 599   //----
 600   void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
 601 
 602   // Debugging
 603 
 604   // only if +VerifyOops
 605   // TODO: Make these macros with file and line like sparc version!
 606   void verify_oop(Register reg, const char* s = "broken oop");
 607   void verify_oop_addr(Address addr, const char * s = "broken oop addr");
 608 
 609   // TODO: verify method and klass metadata (compare against vptr?)
 610   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 611   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 612 
 613 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 614 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 615 
 616   // only if +VerifyFPU
 617   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
 618 
 619   // Verify or restore cpu control state after JNI call
 620   void restore_cpu_control_state_after_jni();
 621 
 622   // prints msg, dumps registers and stops execution
 623   void stop(const char* msg);
 624 
 625   // prints msg and continues
 626   void warn(const char* msg);
 627 
 628   // dumps registers and other state
 629   void print_state();
 630 
 631   static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
 632   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 633   static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
 634   static void print_state64(int64_t pc, int64_t regs[]);
 635 
 636   void os_breakpoint();
 637 
 638   void untested()                                { stop("untested"); }
 639 
 640   void unimplemented(const char* what = "");
 641 
 642   void should_not_reach_here()                   { stop("should not reach here"); }
 643 
 644   void print_CPU_state();
 645 
 646   // Stack overflow checking
 647   void bang_stack_with_offset(int offset) {
 648     // stack grows down, caller passes positive offset
 649     assert(offset > 0, "must bang with negative offset");
 650     movl(Address(rsp, (-offset)), rax);
 651   }
 652 
 653   // Writes to stack successive pages until offset reached to check for
 654   // stack overflow + shadow pages.  Also, clobbers tmp
 655   void bang_stack_size(Register size, Register tmp);
 656 
 657   // Check for reserved stack access in method being exited (for JIT)
 658   void reserved_stack_check();
 659 
 660   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
 661                                                 Register tmp,
 662                                                 int offset);
 663 
 664   // Support for serializing memory accesses between threads
 665   void serialize_memory(Register thread, Register tmp);
 666 
 667   // If thread_reg is != noreg the code assumes the register passed contains
 668   // the thread (required on 64 bit).
 669   void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg);
 670 
 671   void verify_tlab();
 672 
 673   // Biased locking support
 674   // lock_reg and obj_reg must be loaded up with the appropriate values.
 675   // swap_reg must be rax, and is killed.
 676   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
 677   // be killed; if not supplied, push/pop will be used internally to
 678   // allocate a temporary (inefficient, avoid if possible).
 679   // Optional slow case is for implementations (interpreter and C1) which branch to
 680   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 681   // Returns offset of first potentially-faulting instruction for null
 682   // check info (currently consumed only by C1). If
 683   // swap_reg_contains_mark is true then returns -1 as it is assumed
 684   // the calling code has already passed any potential faults.
 685   int biased_locking_enter(Register lock_reg, Register obj_reg,
 686                            Register swap_reg, Register tmp_reg,
 687                            bool swap_reg_contains_mark,
 688                            Label& done, Label* slow_case = NULL,
 689                            BiasedLockingCounters* counters = NULL);
 690   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 691 #ifdef COMPILER2
 692   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 693   // See full desription in macroAssembler_x86.cpp.
 694   void fast_lock(Register obj, Register box, Register tmp,
 695                  Register scr, Register cx1, Register cx2,
 696                  BiasedLockingCounters* counters,
 697                  RTMLockingCounters* rtm_counters,
 698                  RTMLockingCounters* stack_rtm_counters,
 699                  Metadata* method_data,
 700                  bool use_rtm, bool profile_rtm);
 701   void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
 702 #if INCLUDE_RTM_OPT
 703   void rtm_counters_update(Register abort_status, Register rtm_counters);
 704   void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
 705   void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
 706                                    RTMLockingCounters* rtm_counters,
 707                                    Metadata* method_data);
 708   void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
 709                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
 710   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
 711   void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
 712   void rtm_stack_locking(Register obj, Register tmp, Register scr,
 713                          Register retry_on_abort_count,
 714                          RTMLockingCounters* stack_rtm_counters,
 715                          Metadata* method_data, bool profile_rtm,
 716                          Label& DONE_LABEL, Label& IsInflated);
 717   void rtm_inflated_locking(Register obj, Register box, Register tmp,
 718                             Register scr, Register retry_on_busy_count,
 719                             Register retry_on_abort_count,
 720                             RTMLockingCounters* rtm_counters,
 721                             Metadata* method_data, bool profile_rtm,
 722                             Label& DONE_LABEL);
 723 #endif
 724 #endif
 725 
 726   Condition negate_condition(Condition cond);
 727 
 728   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 729   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 730   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 731   // here in MacroAssembler. The major exception to this rule is call
 732 
 733   // Arithmetics
 734 
 735 
 736   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 737   void addptr(Address dst, Register src);
 738 
 739   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 740   void addptr(Register dst, int32_t src);
 741   void addptr(Register dst, Register src);
 742   void addptr(Register dst, RegisterOrConstant src) {
 743     if (src.is_constant()) addptr(dst, (int) src.as_constant());
 744     else                   addptr(dst,       src.as_register());
 745   }
 746 
 747   void andptr(Register dst, int32_t src);
 748   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
 749 
 750   void cmp8(AddressLiteral src1, int imm);
 751 
 752   // renamed to drag out the casting of address to int32_t/intptr_t
 753   void cmp32(Register src1, int32_t imm);
 754 
 755   void cmp32(AddressLiteral src1, int32_t imm);
 756   // compare reg - mem, or reg - &mem
 757   void cmp32(Register src1, AddressLiteral src2);
 758 
 759   void cmp32(Register src1, Address src2);
 760 
 761 #ifndef _LP64
 762   void cmpklass(Address dst, Metadata* obj);
 763   void cmpklass(Register dst, Metadata* obj);
 764   void cmpoop(Address dst, jobject obj);
 765 #endif // _LP64
 766 
 767   void cmpoop(Register src1, Register src2);
 768   void cmpoop(Register src1, Address src2);
 769   void cmpoop(Register dst, jobject obj);
 770 
 771   // NOTE src2 must be the lval. This is NOT an mem-mem compare
 772   void cmpptr(Address src1, AddressLiteral src2);
 773 
 774   void cmpptr(Register src1, AddressLiteral src2);
 775 
 776   void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 777   void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 778   // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 779 
 780   void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 781   void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 782 
 783   // cmp64 to avoild hiding cmpq
 784   void cmp64(Register src1, AddressLiteral src);
 785 
 786   void cmpxchgptr(Register reg, Address adr);
 787 
 788   void locked_cmpxchgptr(Register reg, AddressLiteral adr);
 789 
 790 
 791   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
 792   void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
 793 
 794 
 795   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
 796 
 797   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
 798 
 799   void shlptr(Register dst, int32_t shift);
 800   void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
 801 
 802   void shrptr(Register dst, int32_t shift);
 803   void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
 804 
 805   void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
 806   void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
 807 
 808   void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 809 
 810   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 811   void subptr(Register dst, int32_t src);
 812   // Force generation of a 4 byte immediate value even if it fits into 8bit
 813   void subptr_imm32(Register dst, int32_t src);
 814   void subptr(Register dst, Register src);
 815   void subptr(Register dst, RegisterOrConstant src) {
 816     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 817     else                   subptr(dst,       src.as_register());
 818   }
 819 
 820   void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 821   void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 822 
 823   void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 824   void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 825 
 826   void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
 827 
 828 
 829 
 830   // Helper functions for statistics gathering.
 831   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 832   void cond_inc32(Condition cond, AddressLiteral counter_addr);
 833   // Unconditional atomic increment.
 834   void atomic_incl(Address counter_addr);
 835   void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
 836 #ifdef _LP64
 837   void atomic_incq(Address counter_addr);
 838   void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
 839 #endif
 840   void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
 841   void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
 842 
 843   void lea(Register dst, AddressLiteral adr);
 844   void lea(Address dst, AddressLiteral adr);
 845   void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
 846 
 847   void leal32(Register dst, Address src) { leal(dst, src); }
 848 
 849   // Import other testl() methods from the parent class or else
 850   // they will be hidden by the following overriding declaration.
 851   using Assembler::testl;
 852   void testl(Register dst, AddressLiteral src);
 853 
 854   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 855   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 856   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 857   void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 858 
 859   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
 860   void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
 861   void testptr(Register src1, Register src2);
 862 
 863   void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 864   void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 865 
 866   // Calls
 867 
 868   void call(Label& L, relocInfo::relocType rtype);
 869   void call(Register entry);
 870 
 871   // NOTE: this call transfers to the effective address of entry NOT
 872   // the address contained by entry. This is because this is more natural
 873   // for jumps/calls.
 874   void call(AddressLiteral entry);
 875 
 876   // Emit the CompiledIC call idiom
 877   void ic_call(address entry, jint method_index = 0);
 878 
 879   // Jumps
 880 
 881   // NOTE: these jumps tranfer to the effective address of dst NOT
 882   // the address contained by dst. This is because this is more natural
 883   // for jumps/calls.
 884   void jump(AddressLiteral dst);
 885   void jump_cc(Condition cc, AddressLiteral dst);
 886 
 887   // 32bit can do a case table jump in one instruction but we no longer allow the base
 888   // to be installed in the Address class. This jump will tranfers to the address
 889   // contained in the location described by entry (not the address of entry)
 890   void jump(ArrayAddress entry);
 891 
 892   // Floating
 893 
 894   void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
 895   void andpd(XMMRegister dst, AddressLiteral src);
 896   void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
 897 
 898   void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
 899   void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
 900   void andps(XMMRegister dst, AddressLiteral src);
 901 
 902   void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
 903   void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
 904   void comiss(XMMRegister dst, AddressLiteral src);
 905 
 906   void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
 907   void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
 908   void comisd(XMMRegister dst, AddressLiteral src);
 909 
 910   void fadd_s(Address src)        { Assembler::fadd_s(src); }
 911   void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
 912 
 913   void fldcw(Address src) { Assembler::fldcw(src); }
 914   void fldcw(AddressLiteral src);
 915 
 916   void fld_s(int index)   { Assembler::fld_s(index); }
 917   void fld_s(Address src) { Assembler::fld_s(src); }
 918   void fld_s(AddressLiteral src);
 919 
 920   void fld_d(Address src) { Assembler::fld_d(src); }
 921   void fld_d(AddressLiteral src);
 922 
 923   void fld_x(Address src) { Assembler::fld_x(src); }
 924   void fld_x(AddressLiteral src);
 925 
 926   void fmul_s(Address src)        { Assembler::fmul_s(src); }
 927   void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
 928 
 929   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
 930   void ldmxcsr(AddressLiteral src);
 931 
 932 #ifdef _LP64
 933  private:
 934   void sha256_AVX2_one_round_compute(
 935     Register  reg_old_h,
 936     Register  reg_a,
 937     Register  reg_b,
 938     Register  reg_c,
 939     Register  reg_d,
 940     Register  reg_e,
 941     Register  reg_f,
 942     Register  reg_g,
 943     Register  reg_h,
 944     int iter);
 945   void sha256_AVX2_four_rounds_compute_first(int start);
 946   void sha256_AVX2_four_rounds_compute_last(int start);
 947   void sha256_AVX2_one_round_and_sched(
 948         XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
 949         XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
 950         XMMRegister xmm_2,     /* ymm6 */
 951         XMMRegister xmm_3,     /* ymm7 */
 952         Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
 953         Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
 954         Register    reg_c,      /* edi */
 955         Register    reg_d,      /* esi */
 956         Register    reg_e,      /* r8d */
 957         Register    reg_f,      /* r9d */
 958         Register    reg_g,      /* r10d */
 959         Register    reg_h,      /* r11d */
 960         int iter);
 961 
 962   void addm(int disp, Register r1, Register r2);
 963 
 964  public:
 965   void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 966                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 967                    Register buf, Register state, Register ofs, Register limit, Register rsp,
 968                    bool multi_block, XMMRegister shuf_mask);
 969 #endif
 970 
 971 #ifdef _LP64
 972  private:
 973   void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
 974                                      Register e, Register f, Register g, Register h, int iteration);
 975 
 976   void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
 977                                           Register a, Register b, Register c, Register d, Register e, Register f,
 978                                           Register g, Register h, int iteration);
 979 
 980   void addmq(int disp, Register r1, Register r2);
 981  public:
 982   void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 983                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 984                    Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
 985                    XMMRegister shuf_mask);
 986 #endif
 987 
 988   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
 989                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
 990                  Register buf, Register state, Register ofs, Register limit, Register rsp,
 991                  bool multi_block);
 992 
 993 #ifdef _LP64
 994   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 995                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 996                    Register buf, Register state, Register ofs, Register limit, Register rsp,
 997                    bool multi_block, XMMRegister shuf_mask);
 998 #else
 999   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1000                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1001                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1002                    bool multi_block);
1003 #endif
1004 
1005   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1006                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1007                 Register rax, Register rcx, Register rdx, Register tmp);
1008 
1009 #ifdef _LP64
1010   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1011                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1012                 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2);
1013 
1014   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1015                   XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1016                   Register rax, Register rcx, Register rdx, Register r11);
1017 
1018   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1019                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1020                 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4);
1021 
1022   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1023                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1024                 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2,
1025                 Register tmp3, Register tmp4);
1026 
1027   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1028                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1029                 Register rax, Register rcx, Register rdx, Register tmp1,
1030                 Register tmp2, Register tmp3, Register tmp4);
1031   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1032                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1033                 Register rax, Register rcx, Register rdx, Register tmp1,
1034                 Register tmp2, Register tmp3, Register tmp4);
1035 #else
1036   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1037                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1038                 Register rax, Register rcx, Register rdx, Register tmp1);
1039 
1040   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1041                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1042                 Register rax, Register rcx, Register rdx, Register tmp);
1043 
1044   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1045                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1046                 Register rdx, Register tmp);
1047 
1048   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1049                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1050                 Register rax, Register rbx, Register rdx);
1051 
1052   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1053                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1054                 Register rax, Register rcx, Register rdx, Register tmp);
1055 
1056   void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1057                         Register edx, Register ebx, Register esi, Register edi,
1058                         Register ebp, Register esp);
1059 
1060   void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1061                          Register esi, Register edi, Register ebp, Register esp);
1062 
1063   void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1064                         Register edx, Register ebx, Register esi, Register edi,
1065                         Register ebp, Register esp);
1066 
1067   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1068                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1069                 Register rax, Register rcx, Register rdx, Register tmp);
1070 #endif
1071 
1072   void increase_precision();
1073   void restore_precision();
1074 
1075 private:
1076 
1077   // these are private because users should be doing movflt/movdbl
1078 
1079   void movss(Address dst, XMMRegister src)     { Assembler::movss(dst, src); }
1080   void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1081   void movss(XMMRegister dst, Address src)     { Assembler::movss(dst, src); }
1082   void movss(XMMRegister dst, AddressLiteral src);
1083 
1084   void movlpd(XMMRegister dst, Address src)    {Assembler::movlpd(dst, src); }
1085   void movlpd(XMMRegister dst, AddressLiteral src);
1086 
1087 public:
1088 
1089   void addsd(XMMRegister dst, XMMRegister src)    { Assembler::addsd(dst, src); }
1090   void addsd(XMMRegister dst, Address src)        { Assembler::addsd(dst, src); }
1091   void addsd(XMMRegister dst, AddressLiteral src);
1092 
1093   void addss(XMMRegister dst, XMMRegister src)    { Assembler::addss(dst, src); }
1094   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
1095   void addss(XMMRegister dst, AddressLiteral src);
1096 
1097   void addpd(XMMRegister dst, XMMRegister src)    { Assembler::addpd(dst, src); }
1098   void addpd(XMMRegister dst, Address src)        { Assembler::addpd(dst, src); }
1099   void addpd(XMMRegister dst, AddressLiteral src);
1100 
1101   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
1102   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
1103   void divsd(XMMRegister dst, AddressLiteral src);
1104 
1105   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
1106   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
1107   void divss(XMMRegister dst, AddressLiteral src);
1108 
1109   // Move Unaligned Double Quadword
1110   void movdqu(Address     dst, XMMRegister src);
1111   void movdqu(XMMRegister dst, Address src);
1112   void movdqu(XMMRegister dst, XMMRegister src);
1113   void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);
1114   // AVX Unaligned forms
1115   void vmovdqu(Address     dst, XMMRegister src);
1116   void vmovdqu(XMMRegister dst, Address src);
1117   void vmovdqu(XMMRegister dst, XMMRegister src);
1118   void vmovdqu(XMMRegister dst, AddressLiteral src);
1119 
1120   // Move Aligned Double Quadword
1121   void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
1122   void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
1123   void movdqa(XMMRegister dst, AddressLiteral src);
1124 
1125   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1126   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
1127   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
1128   void movsd(XMMRegister dst, AddressLiteral src);
1129 
1130   void mulpd(XMMRegister dst, XMMRegister src)    { Assembler::mulpd(dst, src); }
1131   void mulpd(XMMRegister dst, Address src)        { Assembler::mulpd(dst, src); }
1132   void mulpd(XMMRegister dst, AddressLiteral src);
1133 
1134   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
1135   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
1136   void mulsd(XMMRegister dst, AddressLiteral src);
1137 
1138   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }
1139   void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
1140   void mulss(XMMRegister dst, AddressLiteral src);
1141 
1142   // Carry-Less Multiplication Quadword
1143   void pclmulldq(XMMRegister dst, XMMRegister src) {
1144     // 0x00 - multiply lower 64 bits [0:63]
1145     Assembler::pclmulqdq(dst, src, 0x00);
1146   }
1147   void pclmulhdq(XMMRegister dst, XMMRegister src) {
1148     // 0x11 - multiply upper 64 bits [64:127]
1149     Assembler::pclmulqdq(dst, src, 0x11);
1150   }
1151 
1152   void pcmpeqb(XMMRegister dst, XMMRegister src);
1153   void pcmpeqw(XMMRegister dst, XMMRegister src);
1154 
1155   void pcmpestri(XMMRegister dst, Address src, int imm8);
1156   void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1157 
1158   void pmovzxbw(XMMRegister dst, XMMRegister src);
1159   void pmovzxbw(XMMRegister dst, Address src);
1160 
1161   void pmovmskb(Register dst, XMMRegister src);
1162 
1163   void ptest(XMMRegister dst, XMMRegister src);
1164 
1165   void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
1166   void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
1167   void sqrtsd(XMMRegister dst, AddressLiteral src);
1168 
1169   void sqrtss(XMMRegister dst, XMMRegister src)    { Assembler::sqrtss(dst, src); }
1170   void sqrtss(XMMRegister dst, Address src)        { Assembler::sqrtss(dst, src); }
1171   void sqrtss(XMMRegister dst, AddressLiteral src);
1172 
1173   void subsd(XMMRegister dst, XMMRegister src)    { Assembler::subsd(dst, src); }
1174   void subsd(XMMRegister dst, Address src)        { Assembler::subsd(dst, src); }
1175   void subsd(XMMRegister dst, AddressLiteral src);
1176 
1177   void subss(XMMRegister dst, XMMRegister src)    { Assembler::subss(dst, src); }
1178   void subss(XMMRegister dst, Address src)        { Assembler::subss(dst, src); }
1179   void subss(XMMRegister dst, AddressLiteral src);
1180 
1181   void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1182   void ucomiss(XMMRegister dst, Address src)     { Assembler::ucomiss(dst, src); }
1183   void ucomiss(XMMRegister dst, AddressLiteral src);
1184 
1185   void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1186   void ucomisd(XMMRegister dst, Address src)     { Assembler::ucomisd(dst, src); }
1187   void ucomisd(XMMRegister dst, AddressLiteral src);
1188 
1189   // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1190   void xorpd(XMMRegister dst, XMMRegister src);
1191   void xorpd(XMMRegister dst, Address src)     { Assembler::xorpd(dst, src); }
1192   void xorpd(XMMRegister dst, AddressLiteral src);
1193 
1194   // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1195   void xorps(XMMRegister dst, XMMRegister src);
1196   void xorps(XMMRegister dst, Address src)     { Assembler::xorps(dst, src); }
1197   void xorps(XMMRegister dst, AddressLiteral src);
1198 
1199   // Shuffle Bytes
1200   void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1201   void pshufb(XMMRegister dst, Address src)     { Assembler::pshufb(dst, src); }
1202   void pshufb(XMMRegister dst, AddressLiteral src);
1203   // AVX 3-operands instructions
1204 
1205   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1206   void vaddsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vaddsd(dst, nds, src); }
1207   void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1208 
1209   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1210   void vaddss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vaddss(dst, nds, src); }
1211   void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1212 
1213   void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1214   void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1215 
1216   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1217   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1218 
1219   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1220   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1221 
1222   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1223   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1224   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1225 
1226   void vpbroadcastw(XMMRegister dst, XMMRegister src);
1227 
1228   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1229   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1230 
1231   void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1232   void vpmovmskb(Register dst, XMMRegister src);
1233 
1234   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1235   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1236 
1237   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1238   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1239 
1240   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1241   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1242 
1243   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1244   void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1245 
1246   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1247   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1248 
1249   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1250   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1251 
1252   void vptest(XMMRegister dst, XMMRegister src);
1253 
1254   void punpcklbw(XMMRegister dst, XMMRegister src);
1255   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1256 
1257   void pshufd(XMMRegister dst, Address src, int mode);
1258   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1259 
1260   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1261   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1262 
1263   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1264   void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandpd(dst, nds, src, vector_len); }
1265   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1266 
1267   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1268   void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandps(dst, nds, src, vector_len); }
1269   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1270 
1271   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1272   void vdivsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivsd(dst, nds, src); }
1273   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1274 
1275   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1276   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
1277   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1278 
1279   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1280   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
1281   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1282 
1283   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1284   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
1285   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1286 
1287   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1288   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
1289   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1290 
1291   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1292   void vsubss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubss(dst, nds, src); }
1293   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1294 
1295   void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1296   void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1297 
1298   // AVX Vector instructions
1299 
1300   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1301   void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1302   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1303 
1304   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1305   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1306   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1307 
1308   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1309     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1310       Assembler::vpxor(dst, nds, src, vector_len);
1311     else
1312       Assembler::vxorpd(dst, nds, src, vector_len);
1313   }
1314   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1315     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1316       Assembler::vpxor(dst, nds, src, vector_len);
1317     else
1318       Assembler::vxorpd(dst, nds, src, vector_len);
1319   }
1320 
1321   // Simple version for AVX2 256bit vectors
1322   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1323   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1324 
1325   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1326     if (UseAVX > 2) {
1327       Assembler::vinserti32x4(dst, dst, src, imm8);
1328     } else if (UseAVX > 1) {
1329       // vinserti128 is available only in AVX2
1330       Assembler::vinserti128(dst, nds, src, imm8);
1331     } else {
1332       Assembler::vinsertf128(dst, nds, src, imm8);
1333     }
1334   }
1335 
1336   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1337     if (UseAVX > 2) {
1338       Assembler::vinserti32x4(dst, dst, src, imm8);
1339     } else if (UseAVX > 1) {
1340       // vinserti128 is available only in AVX2
1341       Assembler::vinserti128(dst, nds, src, imm8);
1342     } else {
1343       Assembler::vinsertf128(dst, nds, src, imm8);
1344     }
1345   }
1346 
1347   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1348     if (UseAVX > 2) {
1349       Assembler::vextracti32x4(dst, src, imm8);
1350     } else if (UseAVX > 1) {
1351       // vextracti128 is available only in AVX2
1352       Assembler::vextracti128(dst, src, imm8);
1353     } else {
1354       Assembler::vextractf128(dst, src, imm8);
1355     }
1356   }
1357 
1358   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1359     if (UseAVX > 2) {
1360       Assembler::vextracti32x4(dst, src, imm8);
1361     } else if (UseAVX > 1) {
1362       // vextracti128 is available only in AVX2
1363       Assembler::vextracti128(dst, src, imm8);
1364     } else {
1365       Assembler::vextractf128(dst, src, imm8);
1366     }
1367   }
1368 
1369   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1370   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1371     vinserti128(dst, dst, src, 1);
1372   }
1373   void vinserti128_high(XMMRegister dst, Address src) {
1374     vinserti128(dst, dst, src, 1);
1375   }
1376   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1377     vextracti128(dst, src, 1);
1378   }
1379   void vextracti128_high(Address dst, XMMRegister src) {
1380     vextracti128(dst, src, 1);
1381   }
1382 
1383   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1384     if (UseAVX > 2) {
1385       Assembler::vinsertf32x4(dst, dst, src, 1);
1386     } else {
1387       Assembler::vinsertf128(dst, dst, src, 1);
1388     }
1389   }
1390 
1391   void vinsertf128_high(XMMRegister dst, Address src) {
1392     if (UseAVX > 2) {
1393       Assembler::vinsertf32x4(dst, dst, src, 1);
1394     } else {
1395       Assembler::vinsertf128(dst, dst, src, 1);
1396     }
1397   }
1398 
1399   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1400     if (UseAVX > 2) {
1401       Assembler::vextractf32x4(dst, src, 1);
1402     } else {
1403       Assembler::vextractf128(dst, src, 1);
1404     }
1405   }
1406 
1407   void vextractf128_high(Address dst, XMMRegister src) {
1408     if (UseAVX > 2) {
1409       Assembler::vextractf32x4(dst, src, 1);
1410     } else {
1411       Assembler::vextractf128(dst, src, 1);
1412     }
1413   }
1414 
1415   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1416   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1417     Assembler::vinserti64x4(dst, dst, src, 1);
1418   }
1419   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1420     Assembler::vinsertf64x4(dst, dst, src, 1);
1421   }
1422   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1423     Assembler::vextracti64x4(dst, src, 1);
1424   }
1425   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1426     Assembler::vextractf64x4(dst, src, 1);
1427   }
1428   void vextractf64x4_high(Address dst, XMMRegister src) {
1429     Assembler::vextractf64x4(dst, src, 1);
1430   }
1431   void vinsertf64x4_high(XMMRegister dst, Address src) {
1432     Assembler::vinsertf64x4(dst, dst, src, 1);
1433   }
1434 
1435   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1436   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1437     vinserti128(dst, dst, src, 0);
1438   }
1439   void vinserti128_low(XMMRegister dst, Address src) {
1440     vinserti128(dst, dst, src, 0);
1441   }
1442   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1443     vextracti128(dst, src, 0);
1444   }
1445   void vextracti128_low(Address dst, XMMRegister src) {
1446     vextracti128(dst, src, 0);
1447   }
1448 
1449   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1450     if (UseAVX > 2) {
1451       Assembler::vinsertf32x4(dst, dst, src, 0);
1452     } else {
1453       Assembler::vinsertf128(dst, dst, src, 0);
1454     }
1455   }
1456 
1457   void vinsertf128_low(XMMRegister dst, Address src) {
1458     if (UseAVX > 2) {
1459       Assembler::vinsertf32x4(dst, dst, src, 0);
1460     } else {
1461       Assembler::vinsertf128(dst, dst, src, 0);
1462     }
1463   }
1464 
1465   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1466     if (UseAVX > 2) {
1467       Assembler::vextractf32x4(dst, src, 0);
1468     } else {
1469       Assembler::vextractf128(dst, src, 0);
1470     }
1471   }
1472 
1473   void vextractf128_low(Address dst, XMMRegister src) {
1474     if (UseAVX > 2) {
1475       Assembler::vextractf32x4(dst, src, 0);
1476     } else {
1477       Assembler::vextractf128(dst, src, 0);
1478     }
1479   }
1480 
1481   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1482   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1483     Assembler::vinserti64x4(dst, dst, src, 0);
1484   }
1485   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1486     Assembler::vinsertf64x4(dst, dst, src, 0);
1487   }
1488   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1489     Assembler::vextracti64x4(dst, src, 0);
1490   }
1491   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1492     Assembler::vextractf64x4(dst, src, 0);
1493   }
1494   void vextractf64x4_low(Address dst, XMMRegister src) {
1495     Assembler::vextractf64x4(dst, src, 0);
1496   }
1497   void vinsertf64x4_low(XMMRegister dst, Address src) {
1498     Assembler::vinsertf64x4(dst, dst, src, 0);
1499   }
1500 
1501   // Carry-Less Multiplication Quadword
1502   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1503     // 0x00 - multiply lower 64 bits [0:63]
1504     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1505   }
1506   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1507     // 0x11 - multiply upper 64 bits [64:127]
1508     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1509   }
1510 
1511   // Data
1512 
1513   void cmov32( Condition cc, Register dst, Address  src);
1514   void cmov32( Condition cc, Register dst, Register src);
1515 
1516   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1517 
1518   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1519   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1520 
1521   void movoop(Register dst, jobject obj);
1522   void movoop(Address dst, jobject obj);
1523 
1524   void mov_metadata(Register dst, Metadata* obj);
1525   void mov_metadata(Address dst, Metadata* obj);
1526 
1527   void movptr(ArrayAddress dst, Register src);
1528   // can this do an lea?
1529   void movptr(Register dst, ArrayAddress src);
1530 
1531   void movptr(Register dst, Address src);
1532 
1533 #ifdef _LP64
1534   void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
1535 #else
1536   void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
1537 #endif
1538 
1539   void movptr(Register dst, intptr_t src);
1540   void movptr(Register dst, Register src);
1541   void movptr(Address dst, intptr_t src);
1542 
1543   void movptr(Address dst, Register src);
1544 
1545   void movptr(Register dst, RegisterOrConstant src) {
1546     if (src.is_constant()) movptr(dst, src.as_constant());
1547     else                   movptr(dst, src.as_register());
1548   }
1549 
1550 #ifdef _LP64
1551   // Generally the next two are only used for moving NULL
1552   // Although there are situations in initializing the mark word where
1553   // they could be used. They are dangerous.
1554 
1555   // They only exist on LP64 so that int32_t and intptr_t are not the same
1556   // and we have ambiguous declarations.
1557 
1558   void movptr(Address dst, int32_t imm32);
1559   void movptr(Register dst, int32_t imm32);
1560 #endif // _LP64
1561 
1562   // to avoid hiding movl
1563   void mov32(AddressLiteral dst, Register src);
1564   void mov32(Register dst, AddressLiteral src);
1565 
1566   // to avoid hiding movb
1567   void movbyte(ArrayAddress dst, int src);
1568 
1569   // Import other mov() methods from the parent class or else
1570   // they will be hidden by the following overriding declaration.
1571   using Assembler::movdl;
1572   using Assembler::movq;
1573   void movdl(XMMRegister dst, AddressLiteral src);
1574   void movq(XMMRegister dst, AddressLiteral src);
1575 
1576   // Can push value or effective address
1577   void pushptr(AddressLiteral src);
1578 
1579   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1580   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1581 
1582   void pushoop(jobject obj);
1583   void pushklass(Metadata* obj);
1584 
1585   // sign extend as need a l to ptr sized element
1586   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1587   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1588 
1589   // C2 compiled method's prolog code.
1590   void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
1591 
1592   // clear memory of size 'cnt' qwords, starting at 'base';
1593   // if 'is_large' is set, do not try to produce short loop
1594   void clear_mem(Register base, Register cnt, Register rtmp, bool is_large);
1595 
1596 #ifdef COMPILER2
1597   void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
1598                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
1599 
1600   // IndexOf strings.
1601   // Small strings are loaded through stack if they cross page boundary.
1602   void string_indexof(Register str1, Register str2,
1603                       Register cnt1, Register cnt2,
1604                       int int_cnt2,  Register result,
1605                       XMMRegister vec, Register tmp,
1606                       int ae);
1607 
1608   // IndexOf for constant substrings with size >= 8 elements
1609   // which don't need to be loaded through stack.
1610   void string_indexofC8(Register str1, Register str2,
1611                       Register cnt1, Register cnt2,
1612                       int int_cnt2,  Register result,
1613                       XMMRegister vec, Register tmp,
1614                       int ae);
1615 
1616     // Smallest code: we don't need to load through stack,
1617     // check string tail.
1618 
1619   // helper function for string_compare
1620   void load_next_elements(Register elem1, Register elem2, Register str1, Register str2,
1621                           Address::ScaleFactor scale, Address::ScaleFactor scale1,
1622                           Address::ScaleFactor scale2, Register index, int ae);
1623   // Compare strings.
1624   void string_compare(Register str1, Register str2,
1625                       Register cnt1, Register cnt2, Register result,
1626                       XMMRegister vec1, int ae);
1627 
1628   // Search for Non-ASCII character (Negative byte value) in a byte array,
1629   // return true if it has any and false otherwise.
1630   void has_negatives(Register ary1, Register len,
1631                      Register result, Register tmp1,
1632                      XMMRegister vec1, XMMRegister vec2);
1633 
1634   // Compare char[] or byte[] arrays.
1635   void arrays_equals(bool is_array_equ, Register ary1, Register ary2,
1636                      Register limit, Register result, Register chr,
1637                      XMMRegister vec1, XMMRegister vec2, bool is_char);
1638 
1639 #endif
1640 
1641   // Fill primitive arrays
1642   void generate_fill(BasicType t, bool aligned,
1643                      Register to, Register value, Register count,
1644                      Register rtmp, XMMRegister xtmp);
1645 
1646   void encode_iso_array(Register src, Register dst, Register len,
1647                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1648                         XMMRegister tmp4, Register tmp5, Register result);
1649 
1650 #ifdef _LP64
1651   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1652   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1653                              Register y, Register y_idx, Register z,
1654                              Register carry, Register product,
1655                              Register idx, Register kdx);
1656   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1657                               Register yz_idx, Register idx,
1658                               Register carry, Register product, int offset);
1659   void multiply_128_x_128_bmi2_loop(Register y, Register z,
1660                                     Register carry, Register carry2,
1661                                     Register idx, Register jdx,
1662                                     Register yz_idx1, Register yz_idx2,
1663                                     Register tmp, Register tmp3, Register tmp4);
1664   void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1665                                Register yz_idx, Register idx, Register jdx,
1666                                Register carry, Register product,
1667                                Register carry2);
1668   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
1669                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1670   void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1671                      Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1672   void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1673                             Register tmp2);
1674   void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1675                        Register rdxReg, Register raxReg);
1676   void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1677   void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1678                        Register tmp3, Register tmp4);
1679   void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1680                      Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1681 
1682   void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1683                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1684                Register raxReg);
1685   void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1686                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1687                Register raxReg);
1688   void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1689                            Register result, Register tmp1, Register tmp2,
1690                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1691 #endif
1692 
1693   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1694   void update_byte_crc32(Register crc, Register val, Register table);
1695   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1696   // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1697   // Note on a naming convention:
1698   // Prefix w = register only used on a Westmere+ architecture
1699   // Prefix n = register only used on a Nehalem architecture
1700 #ifdef _LP64
1701   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1702                        Register tmp1, Register tmp2, Register tmp3);
1703 #else
1704   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1705                        Register tmp1, Register tmp2, Register tmp3,
1706                        XMMRegister xtmp1, XMMRegister xtmp2);
1707 #endif
1708   void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1709                         Register in_out,
1710                         uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1711                         XMMRegister w_xtmp2,
1712                         Register tmp1,
1713                         Register n_tmp2, Register n_tmp3);
1714   void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1715                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1716                        Register tmp1, Register tmp2,
1717                        Register n_tmp3);
1718   void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1719                          Register in_out1, Register in_out2, Register in_out3,
1720                          Register tmp1, Register tmp2, Register tmp3,
1721                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1722                          Register tmp4, Register tmp5,
1723                          Register n_tmp6);
1724   void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1725                             Register tmp1, Register tmp2, Register tmp3,
1726                             Register tmp4, Register tmp5, Register tmp6,
1727                             XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1728                             bool is_pclmulqdq_supported);
1729   // Fold 128-bit data chunk
1730   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1731   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1732   // Fold 8-bit data
1733   void fold_8bit_crc32(Register crc, Register table, Register tmp);
1734   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1735 
1736   // Compress char[] array to byte[].
1737   void char_array_compress(Register src, Register dst, Register len,
1738                            XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1739                            XMMRegister tmp4, Register tmp5, Register result);
1740 
1741   // Inflate byte[] array to char[].
1742   void byte_array_inflate(Register src, Register dst, Register len,
1743                           XMMRegister tmp1, Register tmp2);
1744 
1745 };
1746 
1747 /**
1748  * class SkipIfEqual:
1749  *
1750  * Instantiating this class will result in assembly code being output that will
1751  * jump around any code emitted between the creation of the instance and it's
1752  * automatic destruction at the end of a scope block, depending on the value of
1753  * the flag passed to the constructor, which will be checked at run-time.
1754  */
1755 class SkipIfEqual {
1756  private:
1757   MacroAssembler* _masm;
1758   Label _label;
1759 
1760  public:
1761    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1762    ~SkipIfEqual();
1763 };
1764 
1765 #endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP