1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "utilities/macros.hpp"
  30 #include "runtime/rtmLocking.hpp"
  31 
  32 // MacroAssembler extends Assembler by frequently used macros.
  33 //
  34 // Instructions for which a 'better' code sequence exists depending
  35 // on arguments should also go in here.
  36 
  37 class MacroAssembler: public Assembler {
  38   friend class LIR_Assembler;
  39   friend class Runtime1;      // as_Address()
  40 
  41  protected:
  42 
  43   Address as_Address(AddressLiteral adr);
  44   Address as_Address(ArrayAddress adr);
  45 
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore
  50   // additional registers when doing a VM call).
  51 
  52   virtual void call_VM_leaf_base(
  53     address entry_point,               // the entry point
  54     int     number_of_arguments        // the number of arguments to pop after the call
  55   );
  56 
  57   // This is the base routine called by the different versions of call_VM. The interpreter
  58   // may customize this version by overriding it for its purposes (e.g., to save/restore
  59   // additional registers when doing a VM call).
  60   //
  61   // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
  62   // returns the register which contains the thread upon return. If a thread register has been
  63   // specified, the return value will correspond to that register. If no last_java_sp is specified
  64   // (noreg) than rsp will be used instead.
  65   virtual void call_VM_base(           // returns the register containing the thread upon return
  66     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  67     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  68     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  69     address  entry_point,              // the entry point
  70     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  71     bool     check_exceptions          // whether to check for pending exceptions after return
  72   );
  73 
  74   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  75 
  76   // helpers for FPU flag access
  77   // tmp is a temporary register, if none is available use noreg
  78   void save_rax   (Register tmp);
  79   void restore_rax(Register tmp);
  80 
  81  public:
  82   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  83 
  84  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  85  // The implementation is only non-empty for the InterpreterMacroAssembler,
  86  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  87  virtual void check_and_handle_popframe(Register java_thread);
  88  virtual void check_and_handle_earlyret(Register java_thread);
  89 
  90   // Support for NULL-checks
  91   //
  92   // Generates code that causes a NULL OS exception if the content of reg is NULL.
  93   // If the accessed location is M[reg + offset] and the offset is known, provide the
  94   // offset. No explicit code generation is needed if the offset is within a certain
  95   // range (0 <= offset <= page_size).
  96 
  97   void null_check(Register reg, int offset = -1);
  98   static bool needs_explicit_null_check(intptr_t offset);
  99 
 100   // Required platform-specific helpers for Label::patch_instructions.
 101   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 102   void pd_patch_instruction(address branch, address target) {
 103     unsigned char op = branch[0];
 104     assert(op == 0xE8 /* call */ ||
 105         op == 0xE9 /* jmp */ ||
 106         op == 0xEB /* short jmp */ ||
 107         (op & 0xF0) == 0x70 /* short jcc */ ||
 108         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 109         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 110         "Invalid opcode at patch point");
 111 
 112     if (op == 0xEB || (op & 0xF0) == 0x70) {
 113       // short offset operators (jmp and jcc)
 114       char* disp = (char*) &branch[1];
 115       int imm8 = target - (address) &disp[1];
 116       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
 117       *disp = imm8;
 118     } else {
 119       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
 120       int imm32 = target - (address) &disp[1];
 121       *disp = imm32;
 122     }
 123   }
 124 
 125   // The following 4 methods return the offset of the appropriate move instruction
 126 
 127   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 128   int load_unsigned_byte(Register dst, Address src);
 129   int load_unsigned_short(Register dst, Address src);
 130 
 131   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 132   int load_signed_byte(Register dst, Address src);
 133   int load_signed_short(Register dst, Address src);
 134 
 135   // Support for sign-extension (hi:lo = extend_sign(lo))
 136   void extend_sign(Register hi, Register lo);
 137 
 138   // Load and store values by size and signed-ness
 139   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 140   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 141 
 142   // Support for inc/dec with optimal instruction selection depending on value
 143 
 144   void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
 145   void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
 146 
 147   void decrementl(Address dst, int value = 1);
 148   void decrementl(Register reg, int value = 1);
 149 
 150   void decrementq(Register reg, int value = 1);
 151   void decrementq(Address dst, int value = 1);
 152 
 153   void incrementl(Address dst, int value = 1);
 154   void incrementl(Register reg, int value = 1);
 155 
 156   void incrementq(Register reg, int value = 1);
 157   void incrementq(Address dst, int value = 1);
 158 
 159   // special instructions for EVEX
 160   void setvectmask(Register dst, Register src);
 161   void restorevectmask();
 162 
 163   // Support optimal SSE move instructions.
 164   void movflt(XMMRegister dst, XMMRegister src) {
 165     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 166     else                       { movss (dst, src); return; }
 167   }
 168   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 169   void movflt(XMMRegister dst, AddressLiteral src);
 170   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 171 
 172   void movdbl(XMMRegister dst, XMMRegister src) {
 173     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 174     else                       { movsd (dst, src); return; }
 175   }
 176 
 177   void movdbl(XMMRegister dst, AddressLiteral src);
 178 
 179   void movdbl(XMMRegister dst, Address src) {
 180     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 181     else                         { movlpd(dst, src); return; }
 182   }
 183   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 184 
 185   void incrementl(AddressLiteral dst);
 186   void incrementl(ArrayAddress dst);
 187 
 188   void incrementq(AddressLiteral dst);
 189 
 190   // Alignment
 191   void align(int modulus);
 192   void align(int modulus, int target);
 193 
 194   // A 5 byte nop that is safe for patching (see patch_verified_entry)
 195   void fat_nop();
 196 
 197   // Stack frame creation/removal
 198   void enter();
 199   void leave();
 200 
 201   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 202   // The pointer will be loaded into the thread register.
 203   void get_thread(Register thread);
 204 
 205 
 206   // Support for VM calls
 207   //
 208   // It is imperative that all calls into the VM are handled via the call_VM macros.
 209   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 210   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 211 
 212 
 213   void call_VM(Register oop_result,
 214                address entry_point,
 215                bool check_exceptions = true);
 216   void call_VM(Register oop_result,
 217                address entry_point,
 218                Register arg_1,
 219                bool check_exceptions = true);
 220   void call_VM(Register oop_result,
 221                address entry_point,
 222                Register arg_1, Register arg_2,
 223                bool check_exceptions = true);
 224   void call_VM(Register oop_result,
 225                address entry_point,
 226                Register arg_1, Register arg_2, Register arg_3,
 227                bool check_exceptions = true);
 228 
 229   // Overloadings with last_Java_sp
 230   void call_VM(Register oop_result,
 231                Register last_java_sp,
 232                address entry_point,
 233                int number_of_arguments = 0,
 234                bool check_exceptions = true);
 235   void call_VM(Register oop_result,
 236                Register last_java_sp,
 237                address entry_point,
 238                Register arg_1, bool
 239                check_exceptions = true);
 240   void call_VM(Register oop_result,
 241                Register last_java_sp,
 242                address entry_point,
 243                Register arg_1, Register arg_2,
 244                bool check_exceptions = true);
 245   void call_VM(Register oop_result,
 246                Register last_java_sp,
 247                address entry_point,
 248                Register arg_1, Register arg_2, Register arg_3,
 249                bool check_exceptions = true);
 250 
 251   void get_vm_result  (Register oop_result, Register thread);
 252   void get_vm_result_2(Register metadata_result, Register thread);
 253 
 254   // These always tightly bind to MacroAssembler::call_VM_base
 255   // bypassing the virtual implementation
 256   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 257   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 258   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 259   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 260   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 261 
 262   void call_VM_leaf0(address entry_point);
 263   void call_VM_leaf(address entry_point,
 264                     int number_of_arguments = 0);
 265   void call_VM_leaf(address entry_point,
 266                     Register arg_1);
 267   void call_VM_leaf(address entry_point,
 268                     Register arg_1, Register arg_2);
 269   void call_VM_leaf(address entry_point,
 270                     Register arg_1, Register arg_2, Register arg_3);
 271 
 272   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 273   // bypassing the virtual implementation
 274   void super_call_VM_leaf(address entry_point);
 275   void super_call_VM_leaf(address entry_point, Register arg_1);
 276   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 277   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 278   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 279 
 280   // last Java Frame (fills frame anchor)
 281   void set_last_Java_frame(Register thread,
 282                            Register last_java_sp,
 283                            Register last_java_fp,
 284                            address last_java_pc);
 285 
 286   // thread in the default location (r15_thread on 64bit)
 287   void set_last_Java_frame(Register last_java_sp,
 288                            Register last_java_fp,
 289                            address last_java_pc);
 290 
 291   void reset_last_Java_frame(Register thread, bool clear_fp);
 292 
 293   // thread in the default location (r15_thread on 64bit)
 294   void reset_last_Java_frame(bool clear_fp);
 295 
 296   // Stores
 297   void store_check(Register obj);                // store check for obj - register is destroyed afterwards
 298   void store_check(Register obj, Address dst);   // same as above, dst is exact store location (reg. is destroyed)
 299 
 300   void resolve_jobject(Register value, Register thread, Register tmp);
 301   void clear_jweak_tag(Register possibly_jweak);
 302 
 303 #if INCLUDE_ALL_GCS
 304 
 305   void g1_write_barrier_pre(Register obj,
 306                             Register pre_val,
 307                             Register thread,
 308                             Register tmp,
 309                             bool tosca_live,
 310                             bool expand_call);
 311 
 312   void g1_write_barrier_post(Register store_addr,
 313                              Register new_val,
 314                              Register thread,
 315                              Register tmp,
 316                              Register tmp2);
 317 
 318   void shenandoah_write_barrier_post(Register store_addr,
 319                                      Register new_val,
 320                                      Register thread,
 321                                      Register tmp,
 322                                      Register tmp2);
 323 
 324   void shenandoah_write_barrier(Register dst);
 325 
 326 #endif // INCLUDE_ALL_GCS
 327 
 328   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 329   void c2bool(Register x);
 330 
 331   // C++ bool manipulation
 332 
 333   void movbool(Register dst, Address src);
 334   void movbool(Address dst, bool boolconst);
 335   void movbool(Address dst, Register src);
 336   void testbool(Register dst);
 337 
 338   void load_mirror(Register mirror, Register method);
 339 
 340   // oop manipulations
 341   void load_klass(Register dst, Register src);
 342   void store_klass(Register dst, Register src);
 343 
 344   void load_heap_oop(Register dst, Address src);
 345   void load_heap_oop_not_null(Register dst, Address src);
 346   void store_heap_oop(Address dst, Register src);
 347   void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg);
 348 
 349   // Used for storing NULL. All other oop constants should be
 350   // stored using routines that take a jobject.
 351   void store_heap_oop_null(Address dst);
 352 
 353   void load_prototype_header(Register dst, Register src);
 354 
 355 #ifdef _LP64
 356   void store_klass_gap(Register dst, Register src);
 357 
 358   // This dummy is to prevent a call to store_heap_oop from
 359   // converting a zero (like NULL) into a Register by giving
 360   // the compiler two choices it can't resolve
 361 
 362   void store_heap_oop(Address dst, void* dummy);
 363 
 364   void encode_heap_oop(Register r);
 365   void decode_heap_oop(Register r);
 366   void encode_heap_oop_not_null(Register r);
 367   void decode_heap_oop_not_null(Register r);
 368   void encode_heap_oop_not_null(Register dst, Register src);
 369   void decode_heap_oop_not_null(Register dst, Register src);
 370 
 371   void set_narrow_oop(Register dst, jobject obj);
 372   void set_narrow_oop(Address dst, jobject obj);
 373   void cmp_narrow_oop(Register dst, jobject obj);
 374   void cmp_narrow_oop(Address dst, jobject obj);
 375 
 376   void encode_klass_not_null(Register r);
 377   void decode_klass_not_null(Register r);
 378   void encode_klass_not_null(Register dst, Register src);
 379   void decode_klass_not_null(Register dst, Register src);
 380   void set_narrow_klass(Register dst, Klass* k);
 381   void set_narrow_klass(Address dst, Klass* k);
 382   void cmp_narrow_klass(Register dst, Klass* k);
 383   void cmp_narrow_klass(Address dst, Klass* k);
 384 
 385   // Returns the byte size of the instructions generated by decode_klass_not_null()
 386   // when compressed klass pointers are being used.
 387   static int instr_size_for_decode_klass_not_null();
 388 
 389   // if heap base register is used - reinit it with the correct value
 390   void reinit_heapbase();
 391 
 392   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 393 
 394 #endif // _LP64
 395 
 396   // Int division/remainder for Java
 397   // (as idivl, but checks for special case as described in JVM spec.)
 398   // returns idivl instruction offset for implicit exception handling
 399   int corrected_idivl(Register reg);
 400 
 401   // Long division/remainder for Java
 402   // (as idivq, but checks for special case as described in JVM spec.)
 403   // returns idivq instruction offset for implicit exception handling
 404   int corrected_idivq(Register reg);
 405 
 406   void int3();
 407 
 408   // Long operation macros for a 32bit cpu
 409   // Long negation for Java
 410   void lneg(Register hi, Register lo);
 411 
 412   // Long multiplication for Java
 413   // (destroys contents of eax, ebx, ecx and edx)
 414   void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
 415 
 416   // Long shifts for Java
 417   // (semantics as described in JVM spec.)
 418   void lshl(Register hi, Register lo);                               // hi:lo << (rcx & 0x3f)
 419   void lshr(Register hi, Register lo, bool sign_extension = false);  // hi:lo >> (rcx & 0x3f)
 420 
 421   // Long compare for Java
 422   // (semantics as described in JVM spec.)
 423   void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
 424 
 425 
 426   // misc
 427 
 428   // Sign extension
 429   void sign_extend_short(Register reg);
 430   void sign_extend_byte(Register reg);
 431 
 432   // Division by power of 2, rounding towards 0
 433   void division_with_shift(Register reg, int shift_value);
 434 
 435   // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
 436   //
 437   // CF (corresponds to C0) if x < y
 438   // PF (corresponds to C2) if unordered
 439   // ZF (corresponds to C3) if x = y
 440   //
 441   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 442   // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
 443   void fcmp(Register tmp);
 444   // Variant of the above which allows y to be further down the stack
 445   // and which only pops x and y if specified. If pop_right is
 446   // specified then pop_left must also be specified.
 447   void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
 448 
 449   // Floating-point comparison for Java
 450   // Compares the top-most stack entries on the FPU stack and stores the result in dst.
 451   // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
 452   // (semantics as described in JVM spec.)
 453   void fcmp2int(Register dst, bool unordered_is_less);
 454   // Variant of the above which allows y to be further down the stack
 455   // and which only pops x and y if specified. If pop_right is
 456   // specified then pop_left must also be specified.
 457   void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
 458 
 459   // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
 460   // tmp is a temporary register, if none is available use noreg
 461   void fremr(Register tmp);
 462 
 463   // dst = c = a * b + c
 464   void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 465   void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
 466 
 467   void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 468   void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
 469   void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 470   void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
 471 
 472 
 473   // same as fcmp2int, but using SSE2
 474   void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 475   void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
 476 
 477   // branch to L if FPU flag C2 is set/not set
 478   // tmp is a temporary register, if none is available use noreg
 479   void jC2 (Register tmp, Label& L);
 480   void jnC2(Register tmp, Label& L);
 481 
 482   // Pop ST (ffree & fincstp combined)
 483   void fpop();
 484 
 485   // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
 486   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 487   void load_float(Address src);
 488 
 489   // Store float value to 'address'. If UseSSE >= 1, the value is stored
 490   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 491   void store_float(Address dst);
 492 
 493   // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
 494   // register xmm0. Otherwise, the value is loaded onto the FPU stack.
 495   void load_double(Address src);
 496 
 497   // Store double value to 'address'. If UseSSE >= 2, the value is stored
 498   // from register xmm0. Otherwise, the value is stored from the FPU stack.
 499   void store_double(Address dst);
 500 
 501   // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
 502   void push_fTOS();
 503 
 504   // pops double TOS element from CPU stack and pushes on FPU stack
 505   void pop_fTOS();
 506 
 507   void empty_FPU_stack();
 508 
 509   void push_IU_state();
 510   void pop_IU_state();
 511 
 512   void push_FPU_state();
 513   void pop_FPU_state();
 514 
 515   void push_CPU_state();
 516   void pop_CPU_state();
 517 
 518   // Round up to a power of two
 519   void round_to(Register reg, int modulus);
 520 
 521   // Callee saved registers handling
 522   void push_callee_saved_registers();
 523   void pop_callee_saved_registers();
 524 
 525   // allocation
 526   void eden_allocate(
 527     Register obj,                      // result: pointer to object after successful allocation
 528     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 529     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 530     Register t1,                       // temp register
 531     Label&   slow_case                 // continuation point if fast allocation fails
 532   );
 533   void tlab_allocate(
 534     Register obj,                      // result: pointer to object after successful allocation
 535     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 536     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 537     Register t1,                       // temp register
 538     Register t2,                       // temp register
 539     Label&   slow_case                 // continuation point if fast allocation fails
 540   );
 541   Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
 542   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 543 
 544   void incr_allocated_bytes(Register thread,
 545                             Register var_size_in_bytes, int con_size_in_bytes,
 546                             Register t1 = noreg);
 547 
 548   // interface method calling
 549   void lookup_interface_method(Register recv_klass,
 550                                Register intf_klass,
 551                                RegisterOrConstant itable_index,
 552                                Register method_result,
 553                                Register scan_temp,
 554                                Label& no_such_interface);
 555 
 556   // virtual method calling
 557   void lookup_virtual_method(Register recv_klass,
 558                              RegisterOrConstant vtable_index,
 559                              Register method_result);
 560 
 561   // Test sub_klass against super_klass, with fast and slow paths.
 562 
 563   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 564   // One of the three labels can be NULL, meaning take the fall-through.
 565   // If super_check_offset is -1, the value is loaded up from super_klass.
 566   // No registers are killed, except temp_reg.
 567   void check_klass_subtype_fast_path(Register sub_klass,
 568                                      Register super_klass,
 569                                      Register temp_reg,
 570                                      Label* L_success,
 571                                      Label* L_failure,
 572                                      Label* L_slow_path,
 573                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 574 
 575   // The rest of the type check; must be wired to a corresponding fast path.
 576   // It does not repeat the fast path logic, so don't use it standalone.
 577   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 578   // Updates the sub's secondary super cache as necessary.
 579   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 580   void check_klass_subtype_slow_path(Register sub_klass,
 581                                      Register super_klass,
 582                                      Register temp_reg,
 583                                      Register temp2_reg,
 584                                      Label* L_success,
 585                                      Label* L_failure,
 586                                      bool set_cond_codes = false);
 587 
 588   // Simplified, combined version, good for typical uses.
 589   // Falls through on failure.
 590   void check_klass_subtype(Register sub_klass,
 591                            Register super_klass,
 592                            Register temp_reg,
 593                            Label& L_success);
 594 
 595   // method handles (JSR 292)
 596   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 597 
 598   //----
 599   void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
 600 
 601   // Debugging
 602 
 603   // only if +VerifyOops
 604   // TODO: Make these macros with file and line like sparc version!
 605   void verify_oop(Register reg, const char* s = "broken oop");
 606   void verify_oop_addr(Address addr, const char * s = "broken oop addr");
 607 
 608   void shenandoah_in_heap_check(Register dst, Register tmp, Label& done);
 609   void shenandoah_cset_check(Register dst, Register tmp, Label& done);
 610 
 611   void shenandoah_store_addr_check(Register dst);
 612   void shenandoah_store_addr_check(Address dst);
 613 
 614   void shenandoah_store_val_check(Register dst, Register value);
 615   void shenandoah_store_val_check(Address dst, Register value);
 616 
 617   void shenandoah_lock_check(Register dst);
 618 
 619   // TODO: verify method and klass metadata (compare against vptr?)
 620   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 621   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 622 
 623 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 624 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 625 
 626   // only if +VerifyFPU
 627   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
 628 
 629   // Verify or restore cpu control state after JNI call
 630   void restore_cpu_control_state_after_jni();
 631 
 632   // prints msg, dumps registers and stops execution
 633   void stop(const char* msg);
 634 
 635   // prints msg and continues
 636   void warn(const char* msg);
 637 
 638   // dumps registers and other state
 639   void print_state();
 640 
 641   static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
 642   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 643   static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
 644   static void print_state64(int64_t pc, int64_t regs[]);
 645 
 646   void os_breakpoint();
 647 
 648   void untested()                                { stop("untested"); }
 649 
 650   void unimplemented(const char* what = "");
 651 
 652   void should_not_reach_here()                   { stop("should not reach here"); }
 653 
 654   void print_CPU_state();
 655 
 656   // Stack overflow checking
 657   void bang_stack_with_offset(int offset) {
 658     // stack grows down, caller passes positive offset
 659     assert(offset > 0, "must bang with negative offset");
 660     movl(Address(rsp, (-offset)), rax);
 661   }
 662 
 663   // Writes to stack successive pages until offset reached to check for
 664   // stack overflow + shadow pages.  Also, clobbers tmp
 665   void bang_stack_size(Register size, Register tmp);
 666 
 667   // Check for reserved stack access in method being exited (for JIT)
 668   void reserved_stack_check();
 669 
 670   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
 671                                                 Register tmp,
 672                                                 int offset);
 673 
 674   // Support for serializing memory accesses between threads
 675   void serialize_memory(Register thread, Register tmp);
 676 
 677   void verify_tlab();
 678 
 679   // Biased locking support
 680   // lock_reg and obj_reg must be loaded up with the appropriate values.
 681   // swap_reg must be rax, and is killed.
 682   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
 683   // be killed; if not supplied, push/pop will be used internally to
 684   // allocate a temporary (inefficient, avoid if possible).
 685   // Optional slow case is for implementations (interpreter and C1) which branch to
 686   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 687   // Returns offset of first potentially-faulting instruction for null
 688   // check info (currently consumed only by C1). If
 689   // swap_reg_contains_mark is true then returns -1 as it is assumed
 690   // the calling code has already passed any potential faults.
 691   int biased_locking_enter(Register lock_reg, Register obj_reg,
 692                            Register swap_reg, Register tmp_reg,
 693                            bool swap_reg_contains_mark,
 694                            Label& done, Label* slow_case = NULL,
 695                            BiasedLockingCounters* counters = NULL);
 696   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 697 #ifdef COMPILER2
 698   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 699   // See full desription in macroAssembler_x86.cpp.
 700   void fast_lock(Register obj, Register box, Register tmp,
 701                  Register scr, Register cx1, Register cx2,
 702                  BiasedLockingCounters* counters,
 703                  RTMLockingCounters* rtm_counters,
 704                  RTMLockingCounters* stack_rtm_counters,
 705                  Metadata* method_data,
 706                  bool use_rtm, bool profile_rtm);
 707   void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
 708 #if INCLUDE_RTM_OPT
 709   void rtm_counters_update(Register abort_status, Register rtm_counters);
 710   void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
 711   void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
 712                                    RTMLockingCounters* rtm_counters,
 713                                    Metadata* method_data);
 714   void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
 715                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
 716   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
 717   void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
 718   void rtm_stack_locking(Register obj, Register tmp, Register scr,
 719                          Register retry_on_abort_count,
 720                          RTMLockingCounters* stack_rtm_counters,
 721                          Metadata* method_data, bool profile_rtm,
 722                          Label& DONE_LABEL, Label& IsInflated);
 723   void rtm_inflated_locking(Register obj, Register box, Register tmp,
 724                             Register scr, Register retry_on_busy_count,
 725                             Register retry_on_abort_count,
 726                             RTMLockingCounters* rtm_counters,
 727                             Metadata* method_data, bool profile_rtm,
 728                             Label& DONE_LABEL);
 729 #endif
 730 #endif
 731 
 732   Condition negate_condition(Condition cond);
 733 
 734   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 735   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 736   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 737   // here in MacroAssembler. The major exception to this rule is call
 738 
 739   // Arithmetics
 740 
 741 
 742   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 743   void addptr(Address dst, Register src);
 744 
 745   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 746   void addptr(Register dst, int32_t src);
 747   void addptr(Register dst, Register src);
 748   void addptr(Register dst, RegisterOrConstant src) {
 749     if (src.is_constant()) addptr(dst, (int) src.as_constant());
 750     else                   addptr(dst,       src.as_register());
 751   }
 752 
 753   void andptr(Register dst, int32_t src);
 754   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
 755 
 756   void cmp8(AddressLiteral src1, int imm);
 757 
 758   // renamed to drag out the casting of address to int32_t/intptr_t
 759   void cmp32(Register src1, int32_t imm);
 760 
 761   void cmp32(AddressLiteral src1, int32_t imm);
 762   // compare reg - mem, or reg - &mem
 763   void cmp32(Register src1, AddressLiteral src2);
 764 
 765   void cmp32(Register src1, Address src2);
 766 
 767 #ifndef _LP64
 768   void cmpklass(Address dst, Metadata* obj);
 769   void cmpklass(Register dst, Metadata* obj);
 770   void cmpoop(Address dst, jobject obj);
 771   void cmpoop(Register dst, jobject obj);
 772 #endif // _LP64
 773 
 774   // NOTE src2 must be the lval. This is NOT an mem-mem compare
 775   void cmpptr(Address src1, AddressLiteral src2);
 776 
 777   void cmpptr(Register src1, AddressLiteral src2);
 778 
 779   void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 780   void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 781   // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 782 
 783   void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 784   void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 785 
 786   // cmp64 to avoild hiding cmpq
 787   void cmp64(Register src1, AddressLiteral src);
 788 
 789   // Special cmp for heap objects, possibly inserting required barriers.
 790   void cmpoopptr(Register src1, Register src2);
 791   void cmpoopptr(Register src1, Address src2);
 792 
 793   void cmpxchgptr(Register reg, Address adr);
 794 
 795   // Special Shenandoah CAS implementation that handles false negatives
 796   // due to concurrent evacuation.
 797   void cmpxchg_oop_shenandoah(Register res, Address addr, Register oldval, Register newval,
 798                               bool exchange,
 799                               Register tmp1, Register tmp2);
 800 
 801   void locked_cmpxchgptr(Register reg, AddressLiteral adr);
 802 
 803 
 804   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
 805   void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
 806 
 807 
 808   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
 809 
 810   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
 811 
 812   void shlptr(Register dst, int32_t shift);
 813   void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
 814 
 815   void shrptr(Register dst, int32_t shift);
 816   void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
 817 
 818   void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
 819   void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
 820 
 821   void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 822 
 823   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 824   void subptr(Register dst, int32_t src);
 825   // Force generation of a 4 byte immediate value even if it fits into 8bit
 826   void subptr_imm32(Register dst, int32_t src);
 827   void subptr(Register dst, Register src);
 828   void subptr(Register dst, RegisterOrConstant src) {
 829     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 830     else                   subptr(dst,       src.as_register());
 831   }
 832 
 833   void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 834   void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 835 
 836   void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 837   void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 838 
 839   void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
 840 
 841 
 842 
 843   // Helper functions for statistics gathering.
 844   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 845   void cond_inc32(Condition cond, AddressLiteral counter_addr);
 846   // Unconditional atomic increment.
 847   void atomic_incl(Address counter_addr);
 848   void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
 849 #ifdef _LP64
 850   void atomic_incq(Address counter_addr);
 851   void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
 852 #endif
 853   void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
 854   void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
 855 
 856   void lea(Register dst, AddressLiteral adr);
 857   void lea(Address dst, AddressLiteral adr);
 858   void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
 859 
 860   void leal32(Register dst, Address src) { leal(dst, src); }
 861 
 862   // Import other testl() methods from the parent class or else
 863   // they will be hidden by the following overriding declaration.
 864   using Assembler::testl;
 865   void testl(Register dst, AddressLiteral src);
 866 
 867   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 868   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 869   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 870   void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 871 
 872   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
 873   void testptr(Register src1, Register src2);
 874 
 875   void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 876   void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
 877 
 878   // Calls
 879 
 880   void call(Label& L, relocInfo::relocType rtype);
 881   void call(Register entry);
 882 
 883   // NOTE: this call transfers to the effective address of entry NOT
 884   // the address contained by entry. This is because this is more natural
 885   // for jumps/calls.
 886   void call(AddressLiteral entry);
 887 
 888   // Emit the CompiledIC call idiom
 889   void ic_call(address entry, jint method_index = 0);
 890 
 891   // Jumps
 892 
 893   // NOTE: these jumps tranfer to the effective address of dst NOT
 894   // the address contained by dst. This is because this is more natural
 895   // for jumps/calls.
 896   void jump(AddressLiteral dst);
 897   void jump_cc(Condition cc, AddressLiteral dst);
 898 
 899   // 32bit can do a case table jump in one instruction but we no longer allow the base
 900   // to be installed in the Address class. This jump will tranfers to the address
 901   // contained in the location described by entry (not the address of entry)
 902   void jump(ArrayAddress entry);
 903 
 904   // Floating
 905 
 906   void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
 907   void andpd(XMMRegister dst, AddressLiteral src);
 908   void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
 909 
 910   void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
 911   void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
 912   void andps(XMMRegister dst, AddressLiteral src);
 913 
 914   void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
 915   void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
 916   void comiss(XMMRegister dst, AddressLiteral src);
 917 
 918   void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
 919   void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
 920   void comisd(XMMRegister dst, AddressLiteral src);
 921 
 922   void fadd_s(Address src)        { Assembler::fadd_s(src); }
 923   void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
 924 
 925   void fldcw(Address src) { Assembler::fldcw(src); }
 926   void fldcw(AddressLiteral src);
 927 
 928   void fld_s(int index)   { Assembler::fld_s(index); }
 929   void fld_s(Address src) { Assembler::fld_s(src); }
 930   void fld_s(AddressLiteral src);
 931 
 932   void fld_d(Address src) { Assembler::fld_d(src); }
 933   void fld_d(AddressLiteral src);
 934 
 935   void fld_x(Address src) { Assembler::fld_x(src); }
 936   void fld_x(AddressLiteral src);
 937 
 938   void fmul_s(Address src)        { Assembler::fmul_s(src); }
 939   void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
 940 
 941   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
 942   void ldmxcsr(AddressLiteral src);
 943 
 944 #ifdef _LP64
 945  private:
 946   void sha256_AVX2_one_round_compute(
 947     Register  reg_old_h,
 948     Register  reg_a,
 949     Register  reg_b,
 950     Register  reg_c,
 951     Register  reg_d,
 952     Register  reg_e,
 953     Register  reg_f,
 954     Register  reg_g,
 955     Register  reg_h,
 956     int iter);
 957   void sha256_AVX2_four_rounds_compute_first(int start);
 958   void sha256_AVX2_four_rounds_compute_last(int start);
 959   void sha256_AVX2_one_round_and_sched(
 960         XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
 961         XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
 962         XMMRegister xmm_2,     /* ymm6 */
 963         XMMRegister xmm_3,     /* ymm7 */
 964         Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
 965         Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
 966         Register    reg_c,      /* edi */
 967         Register    reg_d,      /* esi */
 968         Register    reg_e,      /* r8d */
 969         Register    reg_f,      /* r9d */
 970         Register    reg_g,      /* r10d */
 971         Register    reg_h,      /* r11d */
 972         int iter);
 973 
 974   void addm(int disp, Register r1, Register r2);
 975 
 976  public:
 977   void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 978                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 979                    Register buf, Register state, Register ofs, Register limit, Register rsp,
 980                    bool multi_block, XMMRegister shuf_mask);
 981 #endif
 982 
 983 #ifdef _LP64
 984  private:
 985   void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
 986                                      Register e, Register f, Register g, Register h, int iteration);
 987 
 988   void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
 989                                           Register a, Register b, Register c, Register d, Register e, Register f,
 990                                           Register g, Register h, int iteration);
 991 
 992   void addmq(int disp, Register r1, Register r2);
 993  public:
 994   void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
 995                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
 996                    Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
 997                    XMMRegister shuf_mask);
 998 #endif
 999 
1000   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1001                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1002                  Register buf, Register state, Register ofs, Register limit, Register rsp,
1003                  bool multi_block);
1004 
1005 #ifdef _LP64
1006   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1007                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1008                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1009                    bool multi_block, XMMRegister shuf_mask);
1010 #else
1011   void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1012                    XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1013                    Register buf, Register state, Register ofs, Register limit, Register rsp,
1014                    bool multi_block);
1015 #endif
1016 
1017   void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1018                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1019                 Register rax, Register rcx, Register rdx, Register tmp);
1020 
1021 #ifdef _LP64
1022   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1023                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1024                 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2);
1025 
1026   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1027                   XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1028                   Register rax, Register rcx, Register rdx, Register r11);
1029 
1030   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1031                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1032                 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4);
1033 
1034   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1035                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1036                 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2,
1037                 Register tmp3, Register tmp4);
1038 
1039   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1040                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1041                 Register rax, Register rcx, Register rdx, Register tmp1,
1042                 Register tmp2, Register tmp3, Register tmp4);
1043   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1044                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1045                 Register rax, Register rcx, Register rdx, Register tmp1,
1046                 Register tmp2, Register tmp3, Register tmp4);
1047 #else
1048   void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1049                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1050                 Register rax, Register rcx, Register rdx, Register tmp1);
1051 
1052   void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1053                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1054                 Register rax, Register rcx, Register rdx, Register tmp);
1055 
1056   void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1057                 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1058                 Register rdx, Register tmp);
1059 
1060   void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1061                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1062                 Register rax, Register rbx, Register rdx);
1063 
1064   void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1065                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1066                 Register rax, Register rcx, Register rdx, Register tmp);
1067 
1068   void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1069                         Register edx, Register ebx, Register esi, Register edi,
1070                         Register ebp, Register esp);
1071 
1072   void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1073                          Register esi, Register edi, Register ebp, Register esp);
1074 
1075   void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1076                         Register edx, Register ebx, Register esi, Register edi,
1077                         Register ebp, Register esp);
1078 
1079   void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1080                 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1081                 Register rax, Register rcx, Register rdx, Register tmp);
1082 #endif
1083 
1084   void increase_precision();
1085   void restore_precision();
1086 
1087 private:
1088 
1089   // these are private because users should be doing movflt/movdbl
1090 
1091   void movss(Address dst, XMMRegister src)     { Assembler::movss(dst, src); }
1092   void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1093   void movss(XMMRegister dst, Address src)     { Assembler::movss(dst, src); }
1094   void movss(XMMRegister dst, AddressLiteral src);
1095 
1096   void movlpd(XMMRegister dst, Address src)    {Assembler::movlpd(dst, src); }
1097   void movlpd(XMMRegister dst, AddressLiteral src);
1098 
1099 public:
1100 
1101   void addsd(XMMRegister dst, XMMRegister src)    { Assembler::addsd(dst, src); }
1102   void addsd(XMMRegister dst, Address src)        { Assembler::addsd(dst, src); }
1103   void addsd(XMMRegister dst, AddressLiteral src);
1104 
1105   void addss(XMMRegister dst, XMMRegister src)    { Assembler::addss(dst, src); }
1106   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
1107   void addss(XMMRegister dst, AddressLiteral src);
1108 
1109   void addpd(XMMRegister dst, XMMRegister src)    { Assembler::addpd(dst, src); }
1110   void addpd(XMMRegister dst, Address src)        { Assembler::addpd(dst, src); }
1111   void addpd(XMMRegister dst, AddressLiteral src);
1112 
1113   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
1114   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
1115   void divsd(XMMRegister dst, AddressLiteral src);
1116 
1117   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
1118   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
1119   void divss(XMMRegister dst, AddressLiteral src);
1120 
1121   // Move Unaligned Double Quadword
1122   void movdqu(Address     dst, XMMRegister src);
1123   void movdqu(XMMRegister dst, Address src);
1124   void movdqu(XMMRegister dst, XMMRegister src);
1125   void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);
1126   // AVX Unaligned forms
1127   void vmovdqu(Address     dst, XMMRegister src);
1128   void vmovdqu(XMMRegister dst, Address src);
1129   void vmovdqu(XMMRegister dst, XMMRegister src);
1130   void vmovdqu(XMMRegister dst, AddressLiteral src);
1131 
1132   // Move Aligned Double Quadword
1133   void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
1134   void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
1135   void movdqa(XMMRegister dst, AddressLiteral src);
1136 
1137   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1138   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
1139   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
1140   void movsd(XMMRegister dst, AddressLiteral src);
1141 
1142   void mulpd(XMMRegister dst, XMMRegister src)    { Assembler::mulpd(dst, src); }
1143   void mulpd(XMMRegister dst, Address src)        { Assembler::mulpd(dst, src); }
1144   void mulpd(XMMRegister dst, AddressLiteral src);
1145 
1146   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
1147   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
1148   void mulsd(XMMRegister dst, AddressLiteral src);
1149 
1150   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }
1151   void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
1152   void mulss(XMMRegister dst, AddressLiteral src);
1153 
1154   // Carry-Less Multiplication Quadword
1155   void pclmulldq(XMMRegister dst, XMMRegister src) {
1156     // 0x00 - multiply lower 64 bits [0:63]
1157     Assembler::pclmulqdq(dst, src, 0x00);
1158   }
1159   void pclmulhdq(XMMRegister dst, XMMRegister src) {
1160     // 0x11 - multiply upper 64 bits [64:127]
1161     Assembler::pclmulqdq(dst, src, 0x11);
1162   }
1163 
1164   void pcmpeqb(XMMRegister dst, XMMRegister src);
1165   void pcmpeqw(XMMRegister dst, XMMRegister src);
1166 
1167   void pcmpestri(XMMRegister dst, Address src, int imm8);
1168   void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1169 
1170   void pmovzxbw(XMMRegister dst, XMMRegister src);
1171   void pmovzxbw(XMMRegister dst, Address src);
1172 
1173   void pmovmskb(Register dst, XMMRegister src);
1174 
1175   void ptest(XMMRegister dst, XMMRegister src);
1176 
1177   void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
1178   void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
1179   void sqrtsd(XMMRegister dst, AddressLiteral src);
1180 
1181   void sqrtss(XMMRegister dst, XMMRegister src)    { Assembler::sqrtss(dst, src); }
1182   void sqrtss(XMMRegister dst, Address src)        { Assembler::sqrtss(dst, src); }
1183   void sqrtss(XMMRegister dst, AddressLiteral src);
1184 
1185   void subsd(XMMRegister dst, XMMRegister src)    { Assembler::subsd(dst, src); }
1186   void subsd(XMMRegister dst, Address src)        { Assembler::subsd(dst, src); }
1187   void subsd(XMMRegister dst, AddressLiteral src);
1188 
1189   void subss(XMMRegister dst, XMMRegister src)    { Assembler::subss(dst, src); }
1190   void subss(XMMRegister dst, Address src)        { Assembler::subss(dst, src); }
1191   void subss(XMMRegister dst, AddressLiteral src);
1192 
1193   void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1194   void ucomiss(XMMRegister dst, Address src)     { Assembler::ucomiss(dst, src); }
1195   void ucomiss(XMMRegister dst, AddressLiteral src);
1196 
1197   void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1198   void ucomisd(XMMRegister dst, Address src)     { Assembler::ucomisd(dst, src); }
1199   void ucomisd(XMMRegister dst, AddressLiteral src);
1200 
1201   // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1202   void xorpd(XMMRegister dst, XMMRegister src);
1203   void xorpd(XMMRegister dst, Address src)     { Assembler::xorpd(dst, src); }
1204   void xorpd(XMMRegister dst, AddressLiteral src);
1205 
1206   // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1207   void xorps(XMMRegister dst, XMMRegister src);
1208   void xorps(XMMRegister dst, Address src)     { Assembler::xorps(dst, src); }
1209   void xorps(XMMRegister dst, AddressLiteral src);
1210 
1211   // Shuffle Bytes
1212   void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1213   void pshufb(XMMRegister dst, Address src)     { Assembler::pshufb(dst, src); }
1214   void pshufb(XMMRegister dst, AddressLiteral src);
1215   // AVX 3-operands instructions
1216 
1217   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1218   void vaddsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vaddsd(dst, nds, src); }
1219   void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1220 
1221   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1222   void vaddss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vaddss(dst, nds, src); }
1223   void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1224 
1225   void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1226   void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1227 
1228   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1229   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1230 
1231   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1232   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1233 
1234   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1235   void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1236   void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1237 
1238   void vpbroadcastw(XMMRegister dst, XMMRegister src);
1239 
1240   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1241   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1242 
1243   void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1244   void vpmovmskb(Register dst, XMMRegister src);
1245 
1246   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1247   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1248 
1249   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1250   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1251 
1252   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1253   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1254 
1255   void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1256   void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1257 
1258   void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1259   void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1260 
1261   void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1262   void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1263 
1264   void vptest(XMMRegister dst, XMMRegister src);
1265 
1266   void punpcklbw(XMMRegister dst, XMMRegister src);
1267   void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1268 
1269   void pshufd(XMMRegister dst, Address src, int mode);
1270   void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1271 
1272   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1273   void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1274 
1275   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1276   void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandpd(dst, nds, src, vector_len); }
1277   void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1278 
1279   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1280   void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len)     { Assembler::vandps(dst, nds, src, vector_len); }
1281   void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1282 
1283   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1284   void vdivsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivsd(dst, nds, src); }
1285   void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1286 
1287   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1288   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
1289   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1290 
1291   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1292   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
1293   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1294 
1295   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1296   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
1297   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1298 
1299   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1300   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
1301   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1302 
1303   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1304   void vsubss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubss(dst, nds, src); }
1305   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1306 
1307   void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1308   void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1309 
1310   // AVX Vector instructions
1311 
1312   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1313   void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1314   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1315 
1316   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1317   void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1318   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
1319 
1320   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1321     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1322       Assembler::vpxor(dst, nds, src, vector_len);
1323     else
1324       Assembler::vxorpd(dst, nds, src, vector_len);
1325   }
1326   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1327     if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1328       Assembler::vpxor(dst, nds, src, vector_len);
1329     else
1330       Assembler::vxorpd(dst, nds, src, vector_len);
1331   }
1332 
1333   // Simple version for AVX2 256bit vectors
1334   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1335   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1336 
1337   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1338     if (UseAVX > 2) {
1339       Assembler::vinserti32x4(dst, dst, src, imm8);
1340     } else if (UseAVX > 1) {
1341       // vinserti128 is available only in AVX2
1342       Assembler::vinserti128(dst, nds, src, imm8);
1343     } else {
1344       Assembler::vinsertf128(dst, nds, src, imm8);
1345     }
1346   }
1347 
1348   void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1349     if (UseAVX > 2) {
1350       Assembler::vinserti32x4(dst, dst, src, imm8);
1351     } else if (UseAVX > 1) {
1352       // vinserti128 is available only in AVX2
1353       Assembler::vinserti128(dst, nds, src, imm8);
1354     } else {
1355       Assembler::vinsertf128(dst, nds, src, imm8);
1356     }
1357   }
1358 
1359   void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1360     if (UseAVX > 2) {
1361       Assembler::vextracti32x4(dst, src, imm8);
1362     } else if (UseAVX > 1) {
1363       // vextracti128 is available only in AVX2
1364       Assembler::vextracti128(dst, src, imm8);
1365     } else {
1366       Assembler::vextractf128(dst, src, imm8);
1367     }
1368   }
1369 
1370   void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1371     if (UseAVX > 2) {
1372       Assembler::vextracti32x4(dst, src, imm8);
1373     } else if (UseAVX > 1) {
1374       // vextracti128 is available only in AVX2
1375       Assembler::vextracti128(dst, src, imm8);
1376     } else {
1377       Assembler::vextractf128(dst, src, imm8);
1378     }
1379   }
1380 
1381   // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1382   void vinserti128_high(XMMRegister dst, XMMRegister src) {
1383     vinserti128(dst, dst, src, 1);
1384   }
1385   void vinserti128_high(XMMRegister dst, Address src) {
1386     vinserti128(dst, dst, src, 1);
1387   }
1388   void vextracti128_high(XMMRegister dst, XMMRegister src) {
1389     vextracti128(dst, src, 1);
1390   }
1391   void vextracti128_high(Address dst, XMMRegister src) {
1392     vextracti128(dst, src, 1);
1393   }
1394 
1395   void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1396     if (UseAVX > 2) {
1397       Assembler::vinsertf32x4(dst, dst, src, 1);
1398     } else {
1399       Assembler::vinsertf128(dst, dst, src, 1);
1400     }
1401   }
1402 
1403   void vinsertf128_high(XMMRegister dst, Address src) {
1404     if (UseAVX > 2) {
1405       Assembler::vinsertf32x4(dst, dst, src, 1);
1406     } else {
1407       Assembler::vinsertf128(dst, dst, src, 1);
1408     }
1409   }
1410 
1411   void vextractf128_high(XMMRegister dst, XMMRegister src) {
1412     if (UseAVX > 2) {
1413       Assembler::vextractf32x4(dst, src, 1);
1414     } else {
1415       Assembler::vextractf128(dst, src, 1);
1416     }
1417   }
1418 
1419   void vextractf128_high(Address dst, XMMRegister src) {
1420     if (UseAVX > 2) {
1421       Assembler::vextractf32x4(dst, src, 1);
1422     } else {
1423       Assembler::vextractf128(dst, src, 1);
1424     }
1425   }
1426 
1427   // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1428   void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1429     Assembler::vinserti64x4(dst, dst, src, 1);
1430   }
1431   void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1432     Assembler::vinsertf64x4(dst, dst, src, 1);
1433   }
1434   void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1435     Assembler::vextracti64x4(dst, src, 1);
1436   }
1437   void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1438     Assembler::vextractf64x4(dst, src, 1);
1439   }
1440   void vextractf64x4_high(Address dst, XMMRegister src) {
1441     Assembler::vextractf64x4(dst, src, 1);
1442   }
1443   void vinsertf64x4_high(XMMRegister dst, Address src) {
1444     Assembler::vinsertf64x4(dst, dst, src, 1);
1445   }
1446 
1447   // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1448   void vinserti128_low(XMMRegister dst, XMMRegister src) {
1449     vinserti128(dst, dst, src, 0);
1450   }
1451   void vinserti128_low(XMMRegister dst, Address src) {
1452     vinserti128(dst, dst, src, 0);
1453   }
1454   void vextracti128_low(XMMRegister dst, XMMRegister src) {
1455     vextracti128(dst, src, 0);
1456   }
1457   void vextracti128_low(Address dst, XMMRegister src) {
1458     vextracti128(dst, src, 0);
1459   }
1460 
1461   void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1462     if (UseAVX > 2) {
1463       Assembler::vinsertf32x4(dst, dst, src, 0);
1464     } else {
1465       Assembler::vinsertf128(dst, dst, src, 0);
1466     }
1467   }
1468 
1469   void vinsertf128_low(XMMRegister dst, Address src) {
1470     if (UseAVX > 2) {
1471       Assembler::vinsertf32x4(dst, dst, src, 0);
1472     } else {
1473       Assembler::vinsertf128(dst, dst, src, 0);
1474     }
1475   }
1476 
1477   void vextractf128_low(XMMRegister dst, XMMRegister src) {
1478     if (UseAVX > 2) {
1479       Assembler::vextractf32x4(dst, src, 0);
1480     } else {
1481       Assembler::vextractf128(dst, src, 0);
1482     }
1483   }
1484 
1485   void vextractf128_low(Address dst, XMMRegister src) {
1486     if (UseAVX > 2) {
1487       Assembler::vextractf32x4(dst, src, 0);
1488     } else {
1489       Assembler::vextractf128(dst, src, 0);
1490     }
1491   }
1492 
1493   // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1494   void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1495     Assembler::vinserti64x4(dst, dst, src, 0);
1496   }
1497   void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1498     Assembler::vinsertf64x4(dst, dst, src, 0);
1499   }
1500   void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1501     Assembler::vextracti64x4(dst, src, 0);
1502   }
1503   void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1504     Assembler::vextractf64x4(dst, src, 0);
1505   }
1506   void vextractf64x4_low(Address dst, XMMRegister src) {
1507     Assembler::vextractf64x4(dst, src, 0);
1508   }
1509   void vinsertf64x4_low(XMMRegister dst, Address src) {
1510     Assembler::vinsertf64x4(dst, dst, src, 0);
1511   }
1512 
1513   // Carry-Less Multiplication Quadword
1514   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1515     // 0x00 - multiply lower 64 bits [0:63]
1516     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1517   }
1518   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1519     // 0x11 - multiply upper 64 bits [64:127]
1520     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1521   }
1522 
1523   // Data
1524 
1525   void cmov32( Condition cc, Register dst, Address  src);
1526   void cmov32( Condition cc, Register dst, Register src);
1527 
1528   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1529 
1530   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1531   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1532 
1533   void movoop(Register dst, jobject obj);
1534   void movoop(Address dst, jobject obj);
1535 
1536   void mov_metadata(Register dst, Metadata* obj);
1537   void mov_metadata(Address dst, Metadata* obj);
1538 
1539   void movptr(ArrayAddress dst, Register src);
1540   // can this do an lea?
1541   void movptr(Register dst, ArrayAddress src);
1542 
1543   void movptr(Register dst, Address src);
1544 
1545 #ifdef _LP64
1546   void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
1547 #else
1548   void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
1549 #endif
1550 
1551   void movptr(Register dst, intptr_t src);
1552   void movptr(Register dst, Register src);
1553   void movptr(Address dst, intptr_t src);
1554 
1555   void movptr(Address dst, Register src);
1556 
1557   void movptr(Register dst, RegisterOrConstant src) {
1558     if (src.is_constant()) movptr(dst, src.as_constant());
1559     else                   movptr(dst, src.as_register());
1560   }
1561 
1562 #ifdef _LP64
1563   // Generally the next two are only used for moving NULL
1564   // Although there are situations in initializing the mark word where
1565   // they could be used. They are dangerous.
1566 
1567   // They only exist on LP64 so that int32_t and intptr_t are not the same
1568   // and we have ambiguous declarations.
1569 
1570   void movptr(Address dst, int32_t imm32);
1571   void movptr(Register dst, int32_t imm32);
1572 #endif // _LP64
1573 
1574   // to avoid hiding movl
1575   void mov32(AddressLiteral dst, Register src);
1576   void mov32(Register dst, AddressLiteral src);
1577 
1578   // to avoid hiding movb
1579   void movbyte(ArrayAddress dst, int src);
1580 
1581   // Import other mov() methods from the parent class or else
1582   // they will be hidden by the following overriding declaration.
1583   using Assembler::movdl;
1584   using Assembler::movq;
1585   void movdl(XMMRegister dst, AddressLiteral src);
1586   void movq(XMMRegister dst, AddressLiteral src);
1587 
1588   // Can push value or effective address
1589   void pushptr(AddressLiteral src);
1590 
1591   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1592   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1593 
1594   void pushoop(jobject obj);
1595   void pushklass(Metadata* obj);
1596 
1597   // sign extend as need a l to ptr sized element
1598   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1599   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1600 
1601   // C2 compiled method's prolog code.
1602   void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
1603 
1604   // clear memory of size 'cnt' qwords, starting at 'base';
1605   // if 'is_large' is set, do not try to produce short loop
1606   void clear_mem(Register base, Register cnt, Register rtmp, bool is_large);
1607 
1608 #ifdef COMPILER2
1609   void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
1610                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
1611 
1612   // IndexOf strings.
1613   // Small strings are loaded through stack if they cross page boundary.
1614   void string_indexof(Register str1, Register str2,
1615                       Register cnt1, Register cnt2,
1616                       int int_cnt2,  Register result,
1617                       XMMRegister vec, Register tmp,
1618                       int ae);
1619 
1620   // IndexOf for constant substrings with size >= 8 elements
1621   // which don't need to be loaded through stack.
1622   void string_indexofC8(Register str1, Register str2,
1623                       Register cnt1, Register cnt2,
1624                       int int_cnt2,  Register result,
1625                       XMMRegister vec, Register tmp,
1626                       int ae);
1627 
1628     // Smallest code: we don't need to load through stack,
1629     // check string tail.
1630 
1631   // helper function for string_compare
1632   void load_next_elements(Register elem1, Register elem2, Register str1, Register str2,
1633                           Address::ScaleFactor scale, Address::ScaleFactor scale1,
1634                           Address::ScaleFactor scale2, Register index, int ae);
1635   // Compare strings.
1636   void string_compare(Register str1, Register str2,
1637                       Register cnt1, Register cnt2, Register result,
1638                       XMMRegister vec1, int ae);
1639 
1640   // Search for Non-ASCII character (Negative byte value) in a byte array,
1641   // return true if it has any and false otherwise.
1642   void has_negatives(Register ary1, Register len,
1643                      Register result, Register tmp1,
1644                      XMMRegister vec1, XMMRegister vec2);
1645 
1646   // Compare char[] or byte[] arrays.
1647   void arrays_equals(bool is_array_equ, Register ary1, Register ary2,
1648                      Register limit, Register result, Register chr,
1649                      XMMRegister vec1, XMMRegister vec2, bool is_char);
1650 
1651 #endif
1652 
1653   // Fill primitive arrays
1654   void generate_fill(BasicType t, bool aligned,
1655                      Register to, Register value, Register count,
1656                      Register rtmp, XMMRegister xtmp);
1657 
1658   void encode_iso_array(Register src, Register dst, Register len,
1659                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1660                         XMMRegister tmp4, Register tmp5, Register result);
1661 
1662 #ifdef _LP64
1663   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1664   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1665                              Register y, Register y_idx, Register z,
1666                              Register carry, Register product,
1667                              Register idx, Register kdx);
1668   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1669                               Register yz_idx, Register idx,
1670                               Register carry, Register product, int offset);
1671   void multiply_128_x_128_bmi2_loop(Register y, Register z,
1672                                     Register carry, Register carry2,
1673                                     Register idx, Register jdx,
1674                                     Register yz_idx1, Register yz_idx2,
1675                                     Register tmp, Register tmp3, Register tmp4);
1676   void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1677                                Register yz_idx, Register idx, Register jdx,
1678                                Register carry, Register product,
1679                                Register carry2);
1680   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
1681                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1682   void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1683                      Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1684   void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1685                             Register tmp2);
1686   void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1687                        Register rdxReg, Register raxReg);
1688   void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1689   void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1690                        Register tmp3, Register tmp4);
1691   void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1692                      Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1693 
1694   void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1695                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1696                Register raxReg);
1697   void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1698                Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1699                Register raxReg);
1700   void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1701                            Register result, Register tmp1, Register tmp2,
1702                            XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1703 #endif
1704 
1705   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1706   void update_byte_crc32(Register crc, Register val, Register table);
1707   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1708   // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1709   // Note on a naming convention:
1710   // Prefix w = register only used on a Westmere+ architecture
1711   // Prefix n = register only used on a Nehalem architecture
1712 #ifdef _LP64
1713   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1714                        Register tmp1, Register tmp2, Register tmp3);
1715 #else
1716   void crc32c_ipl_alg4(Register in_out, uint32_t n,
1717                        Register tmp1, Register tmp2, Register tmp3,
1718                        XMMRegister xtmp1, XMMRegister xtmp2);
1719 #endif
1720   void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1721                         Register in_out,
1722                         uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1723                         XMMRegister w_xtmp2,
1724                         Register tmp1,
1725                         Register n_tmp2, Register n_tmp3);
1726   void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1727                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1728                        Register tmp1, Register tmp2,
1729                        Register n_tmp3);
1730   void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1731                          Register in_out1, Register in_out2, Register in_out3,
1732                          Register tmp1, Register tmp2, Register tmp3,
1733                          XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1734                          Register tmp4, Register tmp5,
1735                          Register n_tmp6);
1736   void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1737                             Register tmp1, Register tmp2, Register tmp3,
1738                             Register tmp4, Register tmp5, Register tmp6,
1739                             XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1740                             bool is_pclmulqdq_supported);
1741   // Fold 128-bit data chunk
1742   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1743   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1744   // Fold 8-bit data
1745   void fold_8bit_crc32(Register crc, Register table, Register tmp);
1746   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1747 
1748   // Compress char[] array to byte[].
1749   void char_array_compress(Register src, Register dst, Register len,
1750                            XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1751                            XMMRegister tmp4, Register tmp5, Register result);
1752 
1753   // Inflate byte[] array to char[].
1754   void byte_array_inflate(Register src, Register dst, Register len,
1755                           XMMRegister tmp1, Register tmp2);
1756 
1757 
1758   void save_vector_registers();
1759   void restore_vector_registers();
1760 };
1761 
1762 /**
1763  * class SkipIfEqual:
1764  *
1765  * Instantiating this class will result in assembly code being output that will
1766  * jump around any code emitted between the creation of the instance and it's
1767  * automatic destruction at the end of a scope block, depending on the value of
1768  * the flag passed to the constructor, which will be checked at run-time.
1769  */
1770 class SkipIfEqual {
1771  private:
1772   MacroAssembler* _masm;
1773   Label _label;
1774 
1775  public:
1776    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1777    ~SkipIfEqual();
1778 };
1779 
1780 #endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP