1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "asm/assembler.inline.hpp"
  28 #include "compiler/disassembler.hpp"
  29 #include "gc/shared/cardTableModRefBS.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "prims/methodHandles.hpp"
  36 #include "runtime/biasedLocking.hpp"
  37 #include "runtime/interfaceSupport.hpp"
  38 #include "runtime/objectMonitor.hpp"
  39 #include "runtime/os.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/thread.hpp"
  43 #include "utilities/macros.hpp"
  44 #if INCLUDE_ALL_GCS
  45 #include "gc/g1/g1CollectedHeap.inline.hpp"
  46 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  47 #include "gc/g1/heapRegion.hpp"
  48 #endif // INCLUDE_ALL_GCS
  49 #include "crc32c.h"
  50 #ifdef COMPILER2
  51 #include "opto/intrinsicnode.hpp"
  52 #endif
  53 
  54 #ifdef PRODUCT
  55 #define BLOCK_COMMENT(str) /* nothing */
  56 #define STOP(error) stop(error)
  57 #else
  58 #define BLOCK_COMMENT(str) block_comment(str)
  59 #define STOP(error) block_comment(error); stop(error)
  60 #endif
  61 
  62 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  63 
  64 #ifdef ASSERT
  65 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
  66 #endif
  67 
  68 static Assembler::Condition reverse[] = {
  69     Assembler::noOverflow     /* overflow      = 0x0 */ ,
  70     Assembler::overflow       /* noOverflow    = 0x1 */ ,
  71     Assembler::aboveEqual     /* carrySet      = 0x2, below         = 0x2 */ ,
  72     Assembler::below          /* aboveEqual    = 0x3, carryClear    = 0x3 */ ,
  73     Assembler::notZero        /* zero          = 0x4, equal         = 0x4 */ ,
  74     Assembler::zero           /* notZero       = 0x5, notEqual      = 0x5 */ ,
  75     Assembler::above          /* belowEqual    = 0x6 */ ,
  76     Assembler::belowEqual     /* above         = 0x7 */ ,
  77     Assembler::positive       /* negative      = 0x8 */ ,
  78     Assembler::negative       /* positive      = 0x9 */ ,
  79     Assembler::noParity       /* parity        = 0xa */ ,
  80     Assembler::parity         /* noParity      = 0xb */ ,
  81     Assembler::greaterEqual   /* less          = 0xc */ ,
  82     Assembler::less           /* greaterEqual  = 0xd */ ,
  83     Assembler::greater        /* lessEqual     = 0xe */ ,
  84     Assembler::lessEqual      /* greater       = 0xf, */
  85 
  86 };
  87 
  88 
  89 // Implementation of MacroAssembler
  90 
  91 // First all the versions that have distinct versions depending on 32/64 bit
  92 // Unless the difference is trivial (1 line or so).
  93 
  94 #ifndef _LP64
  95 
  96 // 32bit versions
  97 
  98 Address MacroAssembler::as_Address(AddressLiteral adr) {
  99   return Address(adr.target(), adr.rspec());
 100 }
 101 
 102 Address MacroAssembler::as_Address(ArrayAddress adr) {
 103   return Address::make_array(adr);
 104 }
 105 
 106 void MacroAssembler::call_VM_leaf_base(address entry_point,
 107                                        int number_of_arguments) {
 108   call(RuntimeAddress(entry_point));
 109   increment(rsp, number_of_arguments * wordSize);
 110 }
 111 
 112 void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
 113   cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
 114 }
 115 
 116 void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
 117   cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
 118 }
 119 
 120 void MacroAssembler::cmpoop(Address src1, jobject obj) {
 121   cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
 122 }
 123 
 124 void MacroAssembler::cmpoop(Register src1, jobject obj) {
 125   cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
 126 }
 127 
 128 void MacroAssembler::extend_sign(Register hi, Register lo) {
 129   // According to Intel Doc. AP-526, "Integer Divide", p.18.
 130   if (VM_Version::is_P6() && hi == rdx && lo == rax) {
 131     cdql();
 132   } else {
 133     movl(hi, lo);
 134     sarl(hi, 31);
 135   }
 136 }
 137 
 138 void MacroAssembler::jC2(Register tmp, Label& L) {
 139   // set parity bit if FPU flag C2 is set (via rax)
 140   save_rax(tmp);
 141   fwait(); fnstsw_ax();
 142   sahf();
 143   restore_rax(tmp);
 144   // branch
 145   jcc(Assembler::parity, L);
 146 }
 147 
 148 void MacroAssembler::jnC2(Register tmp, Label& L) {
 149   // set parity bit if FPU flag C2 is set (via rax)
 150   save_rax(tmp);
 151   fwait(); fnstsw_ax();
 152   sahf();
 153   restore_rax(tmp);
 154   // branch
 155   jcc(Assembler::noParity, L);
 156 }
 157 
 158 // 32bit can do a case table jump in one instruction but we no longer allow the base
 159 // to be installed in the Address class
 160 void MacroAssembler::jump(ArrayAddress entry) {
 161   jmp(as_Address(entry));
 162 }
 163 
 164 // Note: y_lo will be destroyed
 165 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
 166   // Long compare for Java (semantics as described in JVM spec.)
 167   Label high, low, done;
 168 
 169   cmpl(x_hi, y_hi);
 170   jcc(Assembler::less, low);
 171   jcc(Assembler::greater, high);
 172   // x_hi is the return register
 173   xorl(x_hi, x_hi);
 174   cmpl(x_lo, y_lo);
 175   jcc(Assembler::below, low);
 176   jcc(Assembler::equal, done);
 177 
 178   bind(high);
 179   xorl(x_hi, x_hi);
 180   increment(x_hi);
 181   jmp(done);
 182 
 183   bind(low);
 184   xorl(x_hi, x_hi);
 185   decrementl(x_hi);
 186 
 187   bind(done);
 188 }
 189 
 190 void MacroAssembler::lea(Register dst, AddressLiteral src) {
 191     mov_literal32(dst, (int32_t)src.target(), src.rspec());
 192 }
 193 
 194 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
 195   // leal(dst, as_Address(adr));
 196   // see note in movl as to why we must use a move
 197   mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
 198 }
 199 
 200 void MacroAssembler::leave() {
 201   mov(rsp, rbp);
 202   pop(rbp);
 203 }
 204 
 205 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
 206   // Multiplication of two Java long values stored on the stack
 207   // as illustrated below. Result is in rdx:rax.
 208   //
 209   // rsp ---> [  ??  ] \               \
 210   //            ....    | y_rsp_offset  |
 211   //          [ y_lo ] /  (in bytes)    | x_rsp_offset
 212   //          [ y_hi ]                  | (in bytes)
 213   //            ....                    |
 214   //          [ x_lo ]                 /
 215   //          [ x_hi ]
 216   //            ....
 217   //
 218   // Basic idea: lo(result) = lo(x_lo * y_lo)
 219   //             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
 220   Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
 221   Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
 222   Label quick;
 223   // load x_hi, y_hi and check if quick
 224   // multiplication is possible
 225   movl(rbx, x_hi);
 226   movl(rcx, y_hi);
 227   movl(rax, rbx);
 228   orl(rbx, rcx);                                 // rbx, = 0 <=> x_hi = 0 and y_hi = 0
 229   jcc(Assembler::zero, quick);                   // if rbx, = 0 do quick multiply
 230   // do full multiplication
 231   // 1st step
 232   mull(y_lo);                                    // x_hi * y_lo
 233   movl(rbx, rax);                                // save lo(x_hi * y_lo) in rbx,
 234   // 2nd step
 235   movl(rax, x_lo);
 236   mull(rcx);                                     // x_lo * y_hi
 237   addl(rbx, rax);                                // add lo(x_lo * y_hi) to rbx,
 238   // 3rd step
 239   bind(quick);                                   // note: rbx, = 0 if quick multiply!
 240   movl(rax, x_lo);
 241   mull(y_lo);                                    // x_lo * y_lo
 242   addl(rdx, rbx);                                // correct hi(x_lo * y_lo)
 243 }
 244 
 245 void MacroAssembler::lneg(Register hi, Register lo) {
 246   negl(lo);
 247   adcl(hi, 0);
 248   negl(hi);
 249 }
 250 
 251 void MacroAssembler::lshl(Register hi, Register lo) {
 252   // Java shift left long support (semantics as described in JVM spec., p.305)
 253   // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
 254   // shift value is in rcx !
 255   assert(hi != rcx, "must not use rcx");
 256   assert(lo != rcx, "must not use rcx");
 257   const Register s = rcx;                        // shift count
 258   const int      n = BitsPerWord;
 259   Label L;
 260   andl(s, 0x3f);                                 // s := s & 0x3f (s < 0x40)
 261   cmpl(s, n);                                    // if (s < n)
 262   jcc(Assembler::less, L);                       // else (s >= n)
 263   movl(hi, lo);                                  // x := x << n
 264   xorl(lo, lo);
 265   // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
 266   bind(L);                                       // s (mod n) < n
 267   shldl(hi, lo);                                 // x := x << s
 268   shll(lo);
 269 }
 270 
 271 
 272 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
 273   // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
 274   // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
 275   assert(hi != rcx, "must not use rcx");
 276   assert(lo != rcx, "must not use rcx");
 277   const Register s = rcx;                        // shift count
 278   const int      n = BitsPerWord;
 279   Label L;
 280   andl(s, 0x3f);                                 // s := s & 0x3f (s < 0x40)
 281   cmpl(s, n);                                    // if (s < n)
 282   jcc(Assembler::less, L);                       // else (s >= n)
 283   movl(lo, hi);                                  // x := x >> n
 284   if (sign_extension) sarl(hi, 31);
 285   else                xorl(hi, hi);
 286   // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
 287   bind(L);                                       // s (mod n) < n
 288   shrdl(lo, hi);                                 // x := x >> s
 289   if (sign_extension) sarl(hi);
 290   else                shrl(hi);
 291 }
 292 
 293 void MacroAssembler::movoop(Register dst, jobject obj) {
 294   mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
 295 }
 296 
 297 void MacroAssembler::movoop(Address dst, jobject obj) {
 298   mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
 299 }
 300 
 301 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
 302   mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
 303 }
 304 
 305 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
 306   mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
 307 }
 308 
 309 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
 310   // scratch register is not used,
 311   // it is defined to match parameters of 64-bit version of this method.
 312   if (src.is_lval()) {
 313     mov_literal32(dst, (intptr_t)src.target(), src.rspec());
 314   } else {
 315     movl(dst, as_Address(src));
 316   }
 317 }
 318 
 319 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
 320   movl(as_Address(dst), src);
 321 }
 322 
 323 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
 324   movl(dst, as_Address(src));
 325 }
 326 
 327 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
 328 void MacroAssembler::movptr(Address dst, intptr_t src) {
 329   movl(dst, src);
 330 }
 331 
 332 
 333 void MacroAssembler::pop_callee_saved_registers() {
 334   pop(rcx);
 335   pop(rdx);
 336   pop(rdi);
 337   pop(rsi);
 338 }
 339 
 340 void MacroAssembler::pop_fTOS() {
 341   fld_d(Address(rsp, 0));
 342   addl(rsp, 2 * wordSize);
 343 }
 344 
 345 void MacroAssembler::push_callee_saved_registers() {
 346   push(rsi);
 347   push(rdi);
 348   push(rdx);
 349   push(rcx);
 350 }
 351 
 352 void MacroAssembler::push_fTOS() {
 353   subl(rsp, 2 * wordSize);
 354   fstp_d(Address(rsp, 0));
 355 }
 356 
 357 
 358 void MacroAssembler::pushoop(jobject obj) {
 359   push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
 360 }
 361 
 362 void MacroAssembler::pushklass(Metadata* obj) {
 363   push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
 364 }
 365 
 366 void MacroAssembler::pushptr(AddressLiteral src) {
 367   if (src.is_lval()) {
 368     push_literal32((int32_t)src.target(), src.rspec());
 369   } else {
 370     pushl(as_Address(src));
 371   }
 372 }
 373 
 374 void MacroAssembler::set_word_if_not_zero(Register dst) {
 375   xorl(dst, dst);
 376   set_byte_if_not_zero(dst);
 377 }
 378 
 379 static void pass_arg0(MacroAssembler* masm, Register arg) {
 380   masm->push(arg);
 381 }
 382 
 383 static void pass_arg1(MacroAssembler* masm, Register arg) {
 384   masm->push(arg);
 385 }
 386 
 387 static void pass_arg2(MacroAssembler* masm, Register arg) {
 388   masm->push(arg);
 389 }
 390 
 391 static void pass_arg3(MacroAssembler* masm, Register arg) {
 392   masm->push(arg);
 393 }
 394 
 395 #ifndef PRODUCT
 396 extern "C" void findpc(intptr_t x);
 397 #endif
 398 
 399 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
 400   // In order to get locks to work, we need to fake a in_VM state
 401   JavaThread* thread = JavaThread::current();
 402   JavaThreadState saved_state = thread->thread_state();
 403   thread->set_thread_state(_thread_in_vm);
 404   if (ShowMessageBoxOnError) {
 405     JavaThread* thread = JavaThread::current();
 406     JavaThreadState saved_state = thread->thread_state();
 407     thread->set_thread_state(_thread_in_vm);
 408     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
 409       ttyLocker ttyl;
 410       BytecodeCounter::print();
 411     }
 412     // To see where a verify_oop failed, get $ebx+40/X for this frame.
 413     // This is the value of eip which points to where verify_oop will return.
 414     if (os::message_box(msg, "Execution stopped, print registers?")) {
 415       print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
 416       BREAKPOINT;
 417     }
 418   } else {
 419     ttyLocker ttyl;
 420     ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
 421   }
 422   // Don't assert holding the ttyLock
 423     assert(false, "DEBUG MESSAGE: %s", msg);
 424   ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
 425 }
 426 
 427 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
 428   ttyLocker ttyl;
 429   FlagSetting fs(Debugging, true);
 430   tty->print_cr("eip = 0x%08x", eip);
 431 #ifndef PRODUCT
 432   if ((WizardMode || Verbose) && PrintMiscellaneous) {
 433     tty->cr();
 434     findpc(eip);
 435     tty->cr();
 436   }
 437 #endif
 438 #define PRINT_REG(rax) \
 439   { tty->print("%s = ", #rax); os::print_location(tty, rax); }
 440   PRINT_REG(rax);
 441   PRINT_REG(rbx);
 442   PRINT_REG(rcx);
 443   PRINT_REG(rdx);
 444   PRINT_REG(rdi);
 445   PRINT_REG(rsi);
 446   PRINT_REG(rbp);
 447   PRINT_REG(rsp);
 448 #undef PRINT_REG
 449   // Print some words near top of staack.
 450   int* dump_sp = (int*) rsp;
 451   for (int col1 = 0; col1 < 8; col1++) {
 452     tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
 453     os::print_location(tty, *dump_sp++);
 454   }
 455   for (int row = 0; row < 16; row++) {
 456     tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
 457     for (int col = 0; col < 8; col++) {
 458       tty->print(" 0x%08x", *dump_sp++);
 459     }
 460     tty->cr();
 461   }
 462   // Print some instructions around pc:
 463   Disassembler::decode((address)eip-64, (address)eip);
 464   tty->print_cr("--------");
 465   Disassembler::decode((address)eip, (address)eip+32);
 466 }
 467 
 468 void MacroAssembler::stop(const char* msg) {
 469   ExternalAddress message((address)msg);
 470   // push address of message
 471   pushptr(message.addr());
 472   { Label L; call(L, relocInfo::none); bind(L); }     // push eip
 473   pusha();                                            // push registers
 474   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
 475   hlt();
 476 }
 477 
 478 void MacroAssembler::warn(const char* msg) {
 479   push_CPU_state();
 480 
 481   ExternalAddress message((address) msg);
 482   // push address of message
 483   pushptr(message.addr());
 484 
 485   call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
 486   addl(rsp, wordSize);       // discard argument
 487   pop_CPU_state();
 488 }
 489 
 490 void MacroAssembler::print_state() {
 491   { Label L; call(L, relocInfo::none); bind(L); }     // push eip
 492   pusha();                                            // push registers
 493 
 494   push_CPU_state();
 495   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
 496   pop_CPU_state();
 497 
 498   popa();
 499   addl(rsp, wordSize);
 500 }
 501 
 502 #else // _LP64
 503 
 504 // 64 bit versions
 505 
 506 Address MacroAssembler::as_Address(AddressLiteral adr) {
 507   // amd64 always does this as a pc-rel
 508   // we can be absolute or disp based on the instruction type
 509   // jmp/call are displacements others are absolute
 510   assert(!adr.is_lval(), "must be rval");
 511   assert(reachable(adr), "must be");
 512   return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
 513 
 514 }
 515 
 516 Address MacroAssembler::as_Address(ArrayAddress adr) {
 517   AddressLiteral base = adr.base();
 518   lea(rscratch1, base);
 519   Address index = adr.index();
 520   assert(index._disp == 0, "must not have disp"); // maybe it can?
 521   Address array(rscratch1, index._index, index._scale, index._disp);
 522   return array;
 523 }
 524 
 525 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
 526   Label L, E;
 527 
 528 #ifdef _WIN64
 529   // Windows always allocates space for it's register args
 530   assert(num_args <= 4, "only register arguments supported");
 531   subq(rsp,  frame::arg_reg_save_area_bytes);
 532 #endif
 533 
 534   // Align stack if necessary
 535   testl(rsp, 15);
 536   jcc(Assembler::zero, L);
 537 
 538   subq(rsp, 8);
 539   {
 540     call(RuntimeAddress(entry_point));
 541   }
 542   addq(rsp, 8);
 543   jmp(E);
 544 
 545   bind(L);
 546   {
 547     call(RuntimeAddress(entry_point));
 548   }
 549 
 550   bind(E);
 551 
 552 #ifdef _WIN64
 553   // restore stack pointer
 554   addq(rsp, frame::arg_reg_save_area_bytes);
 555 #endif
 556 
 557 }
 558 
 559 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
 560   assert(!src2.is_lval(), "should use cmpptr");
 561 
 562   if (reachable(src2)) {
 563     cmpq(src1, as_Address(src2));
 564   } else {
 565     lea(rscratch1, src2);
 566     Assembler::cmpq(src1, Address(rscratch1, 0));
 567   }
 568 }
 569 
 570 int MacroAssembler::corrected_idivq(Register reg) {
 571   // Full implementation of Java ldiv and lrem; checks for special
 572   // case as described in JVM spec., p.243 & p.271.  The function
 573   // returns the (pc) offset of the idivl instruction - may be needed
 574   // for implicit exceptions.
 575   //
 576   //         normal case                           special case
 577   //
 578   // input : rax: dividend                         min_long
 579   //         reg: divisor   (may not be eax/edx)   -1
 580   //
 581   // output: rax: quotient  (= rax idiv reg)       min_long
 582   //         rdx: remainder (= rax irem reg)       0
 583   assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
 584   static const int64_t min_long = 0x8000000000000000;
 585   Label normal_case, special_case;
 586 
 587   // check for special case
 588   cmp64(rax, ExternalAddress((address) &min_long));
 589   jcc(Assembler::notEqual, normal_case);
 590   xorl(rdx, rdx); // prepare rdx for possible special case (where
 591                   // remainder = 0)
 592   cmpq(reg, -1);
 593   jcc(Assembler::equal, special_case);
 594 
 595   // handle normal case
 596   bind(normal_case);
 597   cdqq();
 598   int idivq_offset = offset();
 599   idivq(reg);
 600 
 601   // normal and special case exit
 602   bind(special_case);
 603 
 604   return idivq_offset;
 605 }
 606 
 607 void MacroAssembler::decrementq(Register reg, int value) {
 608   if (value == min_jint) { subq(reg, value); return; }
 609   if (value <  0) { incrementq(reg, -value); return; }
 610   if (value == 0) {                        ; return; }
 611   if (value == 1 && UseIncDec) { decq(reg) ; return; }
 612   /* else */      { subq(reg, value)       ; return; }
 613 }
 614 
 615 void MacroAssembler::decrementq(Address dst, int value) {
 616   if (value == min_jint) { subq(dst, value); return; }
 617   if (value <  0) { incrementq(dst, -value); return; }
 618   if (value == 0) {                        ; return; }
 619   if (value == 1 && UseIncDec) { decq(dst) ; return; }
 620   /* else */      { subq(dst, value)       ; return; }
 621 }
 622 
 623 void MacroAssembler::incrementq(AddressLiteral dst) {
 624   if (reachable(dst)) {
 625     incrementq(as_Address(dst));
 626   } else {
 627     lea(rscratch1, dst);
 628     incrementq(Address(rscratch1, 0));
 629   }
 630 }
 631 
 632 void MacroAssembler::incrementq(Register reg, int value) {
 633   if (value == min_jint) { addq(reg, value); return; }
 634   if (value <  0) { decrementq(reg, -value); return; }
 635   if (value == 0) {                        ; return; }
 636   if (value == 1 && UseIncDec) { incq(reg) ; return; }
 637   /* else */      { addq(reg, value)       ; return; }
 638 }
 639 
 640 void MacroAssembler::incrementq(Address dst, int value) {
 641   if (value == min_jint) { addq(dst, value); return; }
 642   if (value <  0) { decrementq(dst, -value); return; }
 643   if (value == 0) {                        ; return; }
 644   if (value == 1 && UseIncDec) { incq(dst) ; return; }
 645   /* else */      { addq(dst, value)       ; return; }
 646 }
 647 
 648 // 32bit can do a case table jump in one instruction but we no longer allow the base
 649 // to be installed in the Address class
 650 void MacroAssembler::jump(ArrayAddress entry) {
 651   lea(rscratch1, entry.base());
 652   Address dispatch = entry.index();
 653   assert(dispatch._base == noreg, "must be");
 654   dispatch._base = rscratch1;
 655   jmp(dispatch);
 656 }
 657 
 658 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
 659   ShouldNotReachHere(); // 64bit doesn't use two regs
 660   cmpq(x_lo, y_lo);
 661 }
 662 
 663 void MacroAssembler::lea(Register dst, AddressLiteral src) {
 664     mov_literal64(dst, (intptr_t)src.target(), src.rspec());
 665 }
 666 
 667 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
 668   mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
 669   movptr(dst, rscratch1);
 670 }
 671 
 672 void MacroAssembler::leave() {
 673   // %%% is this really better? Why not on 32bit too?
 674   emit_int8((unsigned char)0xC9); // LEAVE
 675 }
 676 
 677 void MacroAssembler::lneg(Register hi, Register lo) {
 678   ShouldNotReachHere(); // 64bit doesn't use two regs
 679   negq(lo);
 680 }
 681 
 682 void MacroAssembler::movoop(Register dst, jobject obj) {
 683   mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
 684 }
 685 
 686 void MacroAssembler::movoop(Address dst, jobject obj) {
 687   mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
 688   movq(dst, rscratch1);
 689 }
 690 
 691 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
 692   mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
 693 }
 694 
 695 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
 696   mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
 697   movq(dst, rscratch1);
 698 }
 699 
 700 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
 701   if (src.is_lval()) {
 702     mov_literal64(dst, (intptr_t)src.target(), src.rspec());
 703   } else {
 704     if (reachable(src)) {
 705       movq(dst, as_Address(src));
 706     } else {
 707       lea(scratch, src);
 708       movq(dst, Address(scratch, 0));
 709     }
 710   }
 711 }
 712 
 713 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
 714   movq(as_Address(dst), src);
 715 }
 716 
 717 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
 718   movq(dst, as_Address(src));
 719 }
 720 
 721 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
 722 void MacroAssembler::movptr(Address dst, intptr_t src) {
 723   mov64(rscratch1, src);
 724   movq(dst, rscratch1);
 725 }
 726 
 727 // These are mostly for initializing NULL
 728 void MacroAssembler::movptr(Address dst, int32_t src) {
 729   movslq(dst, src);
 730 }
 731 
 732 void MacroAssembler::movptr(Register dst, int32_t src) {
 733   mov64(dst, (intptr_t)src);
 734 }
 735 
 736 void MacroAssembler::pushoop(jobject obj) {
 737   movoop(rscratch1, obj);
 738   push(rscratch1);
 739 }
 740 
 741 void MacroAssembler::pushklass(Metadata* obj) {
 742   mov_metadata(rscratch1, obj);
 743   push(rscratch1);
 744 }
 745 
 746 void MacroAssembler::pushptr(AddressLiteral src) {
 747   lea(rscratch1, src);
 748   if (src.is_lval()) {
 749     push(rscratch1);
 750   } else {
 751     pushq(Address(rscratch1, 0));
 752   }
 753 }
 754 
 755 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
 756                                            bool clear_pc) {
 757   // we must set sp to zero to clear frame
 758   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
 759   // must clear fp, so that compiled frames are not confused; it is
 760   // possible that we need it only for debugging
 761   if (clear_fp) {
 762     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
 763   }
 764 
 765   if (clear_pc) {
 766     movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
 767   }
 768 }
 769 
 770 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 771                                          Register last_java_fp,
 772                                          address  last_java_pc) {
 773   // determine last_java_sp register
 774   if (!last_java_sp->is_valid()) {
 775     last_java_sp = rsp;
 776   }
 777 
 778   // last_java_fp is optional
 779   if (last_java_fp->is_valid()) {
 780     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
 781            last_java_fp);
 782   }
 783 
 784   // last_java_pc is optional
 785   if (last_java_pc != NULL) {
 786     Address java_pc(r15_thread,
 787                     JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
 788     lea(rscratch1, InternalAddress(last_java_pc));
 789     movptr(java_pc, rscratch1);
 790   }
 791 
 792   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
 793 }
 794 
 795 static void pass_arg0(MacroAssembler* masm, Register arg) {
 796   if (c_rarg0 != arg ) {
 797     masm->mov(c_rarg0, arg);
 798   }
 799 }
 800 
 801 static void pass_arg1(MacroAssembler* masm, Register arg) {
 802   if (c_rarg1 != arg ) {
 803     masm->mov(c_rarg1, arg);
 804   }
 805 }
 806 
 807 static void pass_arg2(MacroAssembler* masm, Register arg) {
 808   if (c_rarg2 != arg ) {
 809     masm->mov(c_rarg2, arg);
 810   }
 811 }
 812 
 813 static void pass_arg3(MacroAssembler* masm, Register arg) {
 814   if (c_rarg3 != arg ) {
 815     masm->mov(c_rarg3, arg);
 816   }
 817 }
 818 
 819 void MacroAssembler::stop(const char* msg) {
 820   address rip = pc();
 821   pusha(); // get regs on stack
 822   lea(c_rarg0, ExternalAddress((address) msg));
 823   lea(c_rarg1, InternalAddress(rip));
 824   movq(c_rarg2, rsp); // pass pointer to regs array
 825   andq(rsp, -16); // align stack as required by ABI
 826   call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
 827   hlt();
 828 }
 829 
 830 void MacroAssembler::warn(const char* msg) {
 831   push(rbp);
 832   movq(rbp, rsp);
 833   andq(rsp, -16);     // align stack as required by push_CPU_state and call
 834   push_CPU_state();   // keeps alignment at 16 bytes
 835   lea(c_rarg0, ExternalAddress((address) msg));
 836   call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
 837   pop_CPU_state();
 838   mov(rsp, rbp);
 839   pop(rbp);
 840 }
 841 
 842 void MacroAssembler::print_state() {
 843   address rip = pc();
 844   pusha();            // get regs on stack
 845   push(rbp);
 846   movq(rbp, rsp);
 847   andq(rsp, -16);     // align stack as required by push_CPU_state and call
 848   push_CPU_state();   // keeps alignment at 16 bytes
 849 
 850   lea(c_rarg0, InternalAddress(rip));
 851   lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
 852   call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
 853 
 854   pop_CPU_state();
 855   mov(rsp, rbp);
 856   pop(rbp);
 857   popa();
 858 }
 859 
 860 #ifndef PRODUCT
 861 extern "C" void findpc(intptr_t x);
 862 #endif
 863 
 864 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
 865   // In order to get locks to work, we need to fake a in_VM state
 866   if (ShowMessageBoxOnError) {
 867     JavaThread* thread = JavaThread::current();
 868     JavaThreadState saved_state = thread->thread_state();
 869     thread->set_thread_state(_thread_in_vm);
 870 #ifndef PRODUCT
 871     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
 872       ttyLocker ttyl;
 873       BytecodeCounter::print();
 874     }
 875 #endif
 876     // To see where a verify_oop failed, get $ebx+40/X for this frame.
 877     // XXX correct this offset for amd64
 878     // This is the value of eip which points to where verify_oop will return.
 879     if (os::message_box(msg, "Execution stopped, print registers?")) {
 880       print_state64(pc, regs);
 881       BREAKPOINT;
 882       assert(false, "start up GDB");
 883     }
 884     ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
 885   } else {
 886     ttyLocker ttyl;
 887     ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
 888                     msg);
 889     assert(false, "DEBUG MESSAGE: %s", msg);
 890   }
 891 }
 892 
 893 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
 894   ttyLocker ttyl;
 895   FlagSetting fs(Debugging, true);
 896   tty->print_cr("rip = 0x%016lx", pc);
 897 #ifndef PRODUCT
 898   tty->cr();
 899   findpc(pc);
 900   tty->cr();
 901 #endif
 902 #define PRINT_REG(rax, value) \
 903   { tty->print("%s = ", #rax); os::print_location(tty, value); }
 904   PRINT_REG(rax, regs[15]);
 905   PRINT_REG(rbx, regs[12]);
 906   PRINT_REG(rcx, regs[14]);
 907   PRINT_REG(rdx, regs[13]);
 908   PRINT_REG(rdi, regs[8]);
 909   PRINT_REG(rsi, regs[9]);
 910   PRINT_REG(rbp, regs[10]);
 911   PRINT_REG(rsp, regs[11]);
 912   PRINT_REG(r8 , regs[7]);
 913   PRINT_REG(r9 , regs[6]);
 914   PRINT_REG(r10, regs[5]);
 915   PRINT_REG(r11, regs[4]);
 916   PRINT_REG(r12, regs[3]);
 917   PRINT_REG(r13, regs[2]);
 918   PRINT_REG(r14, regs[1]);
 919   PRINT_REG(r15, regs[0]);
 920 #undef PRINT_REG
 921   // Print some words near top of staack.
 922   int64_t* rsp = (int64_t*) regs[11];
 923   int64_t* dump_sp = rsp;
 924   for (int col1 = 0; col1 < 8; col1++) {
 925     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
 926     os::print_location(tty, *dump_sp++);
 927   }
 928   for (int row = 0; row < 25; row++) {
 929     tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
 930     for (int col = 0; col < 4; col++) {
 931       tty->print(" 0x%016lx", *dump_sp++);
 932     }
 933     tty->cr();
 934   }
 935   // Print some instructions around pc:
 936   Disassembler::decode((address)pc-64, (address)pc);
 937   tty->print_cr("--------");
 938   Disassembler::decode((address)pc, (address)pc+32);
 939 }
 940 
 941 #endif // _LP64
 942 
 943 // Now versions that are common to 32/64 bit
 944 
 945 void MacroAssembler::addptr(Register dst, int32_t imm32) {
 946   LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
 947 }
 948 
 949 void MacroAssembler::addptr(Register dst, Register src) {
 950   LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
 951 }
 952 
 953 void MacroAssembler::addptr(Address dst, Register src) {
 954   LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
 955 }
 956 
 957 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
 958   if (reachable(src)) {
 959     Assembler::addsd(dst, as_Address(src));
 960   } else {
 961     lea(rscratch1, src);
 962     Assembler::addsd(dst, Address(rscratch1, 0));
 963   }
 964 }
 965 
 966 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
 967   if (reachable(src)) {
 968     addss(dst, as_Address(src));
 969   } else {
 970     lea(rscratch1, src);
 971     addss(dst, Address(rscratch1, 0));
 972   }
 973 }
 974 
 975 void MacroAssembler::align(int modulus) {
 976   align(modulus, offset());
 977 }
 978 
 979 void MacroAssembler::align(int modulus, int target) {
 980   if (target % modulus != 0) {
 981     nop(modulus - (target % modulus));
 982   }
 983 }
 984 
 985 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
 986   // Used in sign-masking with aligned address.
 987   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 988   if (reachable(src)) {
 989     Assembler::andpd(dst, as_Address(src));
 990   } else {
 991     lea(rscratch1, src);
 992     Assembler::andpd(dst, Address(rscratch1, 0));
 993   }
 994 }
 995 
 996 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
 997   // Used in sign-masking with aligned address.
 998   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 999   if (reachable(src)) {
1000     Assembler::andps(dst, as_Address(src));
1001   } else {
1002     lea(rscratch1, src);
1003     Assembler::andps(dst, Address(rscratch1, 0));
1004   }
1005 }
1006 
1007 void MacroAssembler::andptr(Register dst, int32_t imm32) {
1008   LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
1009 }
1010 
1011 void MacroAssembler::atomic_incl(Address counter_addr) {
1012   if (os::is_MP())
1013     lock();
1014   incrementl(counter_addr);
1015 }
1016 
1017 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {
1018   if (reachable(counter_addr)) {
1019     atomic_incl(as_Address(counter_addr));
1020   } else {
1021     lea(scr, counter_addr);
1022     atomic_incl(Address(scr, 0));
1023   }
1024 }
1025 
1026 #ifdef _LP64
1027 void MacroAssembler::atomic_incq(Address counter_addr) {
1028   if (os::is_MP())
1029     lock();
1030   incrementq(counter_addr);
1031 }
1032 
1033 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) {
1034   if (reachable(counter_addr)) {
1035     atomic_incq(as_Address(counter_addr));
1036   } else {
1037     lea(scr, counter_addr);
1038     atomic_incq(Address(scr, 0));
1039   }
1040 }
1041 #endif
1042 
1043 // Writes to stack successive pages until offset reached to check for
1044 // stack overflow + shadow pages.  This clobbers tmp.
1045 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
1046   movptr(tmp, rsp);
1047   // Bang stack for total size given plus shadow page size.
1048   // Bang one page at a time because large size can bang beyond yellow and
1049   // red zones.
1050   Label loop;
1051   bind(loop);
1052   movl(Address(tmp, (-os::vm_page_size())), size );
1053   subptr(tmp, os::vm_page_size());
1054   subl(size, os::vm_page_size());
1055   jcc(Assembler::greater, loop);
1056 
1057   // Bang down shadow pages too.
1058   // At this point, (tmp-0) is the last address touched, so don't
1059   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
1060   // was post-decremented.)  Skip this address by starting at i=1, and
1061   // touch a few more pages below.  N.B.  It is important to touch all
1062   // the way down to and including i=StackShadowPages.
1063   for (int i = 1; i < StackShadowPages; i++) {
1064     // this could be any sized move but this is can be a debugging crumb
1065     // so the bigger the better.
1066     movptr(Address(tmp, (-i*os::vm_page_size())), size );
1067   }
1068 }
1069 
1070 void MacroAssembler::reserved_stack_check() {
1071     // testing if reserved zone needs to be enabled
1072     Label no_reserved_zone_enabling;
1073     Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
1074     NOT_LP64(get_thread(rsi);)
1075 
1076     cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset()));
1077     jcc(Assembler::below, no_reserved_zone_enabling);
1078 
1079     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread);
1080     jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
1081     should_not_reach_here();
1082 
1083     bind(no_reserved_zone_enabling);
1084 }
1085 
1086 int MacroAssembler::biased_locking_enter(Register lock_reg,
1087                                          Register obj_reg,
1088                                          Register swap_reg,
1089                                          Register tmp_reg,
1090                                          bool swap_reg_contains_mark,
1091                                          Label& done,
1092                                          Label* slow_case,
1093                                          BiasedLockingCounters* counters) {
1094   assert(UseBiasedLocking, "why call this otherwise?");
1095   assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
1096   assert(tmp_reg != noreg, "tmp_reg must be supplied");
1097   assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
1098   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
1099   Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
1100   Address saved_mark_addr(lock_reg, 0);
1101 
1102   if (PrintBiasedLockingStatistics && counters == NULL) {
1103     counters = BiasedLocking::counters();
1104   }
1105   // Biased locking
1106   // See whether the lock is currently biased toward our thread and
1107   // whether the epoch is still valid
1108   // Note that the runtime guarantees sufficient alignment of JavaThread
1109   // pointers to allow age to be placed into low bits
1110   // First check to see whether biasing is even enabled for this object
1111   Label cas_label;
1112   int null_check_offset = -1;
1113   if (!swap_reg_contains_mark) {
1114     null_check_offset = offset();
1115     movptr(swap_reg, mark_addr);
1116   }
1117   movptr(tmp_reg, swap_reg);
1118   andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
1119   cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
1120   jcc(Assembler::notEqual, cas_label);
1121   // The bias pattern is present in the object's header. Need to check
1122   // whether the bias owner and the epoch are both still current.
1123 #ifndef _LP64
1124   // Note that because there is no current thread register on x86_32 we
1125   // need to store off the mark word we read out of the object to
1126   // avoid reloading it and needing to recheck invariants below. This
1127   // store is unfortunate but it makes the overall code shorter and
1128   // simpler.
1129   movptr(saved_mark_addr, swap_reg);
1130 #endif
1131   if (swap_reg_contains_mark) {
1132     null_check_offset = offset();
1133   }
1134   load_prototype_header(tmp_reg, obj_reg);
1135 #ifdef _LP64
1136   orptr(tmp_reg, r15_thread);
1137   xorptr(tmp_reg, swap_reg);
1138   Register header_reg = tmp_reg;
1139 #else
1140   xorptr(tmp_reg, swap_reg);
1141   get_thread(swap_reg);
1142   xorptr(swap_reg, tmp_reg);
1143   Register header_reg = swap_reg;
1144 #endif
1145   andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
1146   if (counters != NULL) {
1147     cond_inc32(Assembler::zero,
1148                ExternalAddress((address) counters->biased_lock_entry_count_addr()));
1149   }
1150   jcc(Assembler::equal, done);
1151 
1152   Label try_revoke_bias;
1153   Label try_rebias;
1154 
1155   // At this point we know that the header has the bias pattern and
1156   // that we are not the bias owner in the current epoch. We need to
1157   // figure out more details about the state of the header in order to
1158   // know what operations can be legally performed on the object's
1159   // header.
1160 
1161   // If the low three bits in the xor result aren't clear, that means
1162   // the prototype header is no longer biased and we have to revoke
1163   // the bias on this object.
1164   testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
1165   jccb(Assembler::notZero, try_revoke_bias);
1166 
1167   // Biasing is still enabled for this data type. See whether the
1168   // epoch of the current bias is still valid, meaning that the epoch
1169   // bits of the mark word are equal to the epoch bits of the
1170   // prototype header. (Note that the prototype header's epoch bits
1171   // only change at a safepoint.) If not, attempt to rebias the object
1172   // toward the current thread. Note that we must be absolutely sure
1173   // that the current epoch is invalid in order to do this because
1174   // otherwise the manipulations it performs on the mark word are
1175   // illegal.
1176   testptr(header_reg, markOopDesc::epoch_mask_in_place);
1177   jccb(Assembler::notZero, try_rebias);
1178 
1179   // The epoch of the current bias is still valid but we know nothing
1180   // about the owner; it might be set or it might be clear. Try to
1181   // acquire the bias of the object using an atomic operation. If this
1182   // fails we will go in to the runtime to revoke the object's bias.
1183   // Note that we first construct the presumed unbiased header so we
1184   // don't accidentally blow away another thread's valid bias.
1185   NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1186   andptr(swap_reg,
1187          markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
1188 #ifdef _LP64
1189   movptr(tmp_reg, swap_reg);
1190   orptr(tmp_reg, r15_thread);
1191 #else
1192   get_thread(tmp_reg);
1193   orptr(tmp_reg, swap_reg);
1194 #endif
1195   if (os::is_MP()) {
1196     lock();
1197   }
1198   cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1199   // If the biasing toward our thread failed, this means that
1200   // another thread succeeded in biasing it toward itself and we
1201   // need to revoke that bias. The revocation will occur in the
1202   // interpreter runtime in the slow case.
1203   if (counters != NULL) {
1204     cond_inc32(Assembler::zero,
1205                ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
1206   }
1207   if (slow_case != NULL) {
1208     jcc(Assembler::notZero, *slow_case);
1209   }
1210   jmp(done);
1211 
1212   bind(try_rebias);
1213   // At this point we know the epoch has expired, meaning that the
1214   // current "bias owner", if any, is actually invalid. Under these
1215   // circumstances _only_, we are allowed to use the current header's
1216   // value as the comparison value when doing the cas to acquire the
1217   // bias in the current epoch. In other words, we allow transfer of
1218   // the bias from one thread to another directly in this situation.
1219   //
1220   // FIXME: due to a lack of registers we currently blow away the age
1221   // bits in this situation. Should attempt to preserve them.
1222   load_prototype_header(tmp_reg, obj_reg);
1223 #ifdef _LP64
1224   orptr(tmp_reg, r15_thread);
1225 #else
1226   get_thread(swap_reg);
1227   orptr(tmp_reg, swap_reg);
1228   movptr(swap_reg, saved_mark_addr);
1229 #endif
1230   if (os::is_MP()) {
1231     lock();
1232   }
1233   cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1234   // If the biasing toward our thread failed, then another thread
1235   // succeeded in biasing it toward itself and we need to revoke that
1236   // bias. The revocation will occur in the runtime in the slow case.
1237   if (counters != NULL) {
1238     cond_inc32(Assembler::zero,
1239                ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
1240   }
1241   if (slow_case != NULL) {
1242     jcc(Assembler::notZero, *slow_case);
1243   }
1244   jmp(done);
1245 
1246   bind(try_revoke_bias);
1247   // The prototype mark in the klass doesn't have the bias bit set any
1248   // more, indicating that objects of this data type are not supposed
1249   // to be biased any more. We are going to try to reset the mark of
1250   // this object to the prototype value and fall through to the
1251   // CAS-based locking scheme. Note that if our CAS fails, it means
1252   // that another thread raced us for the privilege of revoking the
1253   // bias of this particular object, so it's okay to continue in the
1254   // normal locking code.
1255   //
1256   // FIXME: due to a lack of registers we currently blow away the age
1257   // bits in this situation. Should attempt to preserve them.
1258   NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1259   load_prototype_header(tmp_reg, obj_reg);
1260   if (os::is_MP()) {
1261     lock();
1262   }
1263   cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1264   // Fall through to the normal CAS-based lock, because no matter what
1265   // the result of the above CAS, some thread must have succeeded in
1266   // removing the bias bit from the object's header.
1267   if (counters != NULL) {
1268     cond_inc32(Assembler::zero,
1269                ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
1270   }
1271 
1272   bind(cas_label);
1273 
1274   return null_check_offset;
1275 }
1276 
1277 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
1278   assert(UseBiasedLocking, "why call this otherwise?");
1279 
1280   // Check for biased locking unlock case, which is a no-op
1281   // Note: we do not have to check the thread ID for two reasons.
1282   // First, the interpreter checks for IllegalMonitorStateException at
1283   // a higher level. Second, if the bias was revoked while we held the
1284   // lock, the object could not be rebiased toward another thread, so
1285   // the bias bit would be clear.
1286   movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1287   andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
1288   cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
1289   jcc(Assembler::equal, done);
1290 }
1291 
1292 #ifdef COMPILER2
1293 
1294 #if INCLUDE_RTM_OPT
1295 
1296 // Update rtm_counters based on abort status
1297 // input: abort_status
1298 //        rtm_counters (RTMLockingCounters*)
1299 // flags are killed
1300 void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) {
1301 
1302   atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset()));
1303   if (PrintPreciseRTMLockingStatistics) {
1304     for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
1305       Label check_abort;
1306       testl(abort_status, (1<<i));
1307       jccb(Assembler::equal, check_abort);
1308       atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx))));
1309       bind(check_abort);
1310     }
1311   }
1312 }
1313 
1314 // Branch if (random & (count-1) != 0), count is 2^n
1315 // tmp, scr and flags are killed
1316 void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) {
1317   assert(tmp == rax, "");
1318   assert(scr == rdx, "");
1319   rdtsc(); // modifies EDX:EAX
1320   andptr(tmp, count-1);
1321   jccb(Assembler::notZero, brLabel);
1322 }
1323 
1324 // Perform abort ratio calculation, set no_rtm bit if high ratio
1325 // input:  rtm_counters_Reg (RTMLockingCounters* address)
1326 // tmpReg, rtm_counters_Reg and flags are killed
1327 void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
1328                                                  Register rtm_counters_Reg,
1329                                                  RTMLockingCounters* rtm_counters,
1330                                                  Metadata* method_data) {
1331   Label L_done, L_check_always_rtm1, L_check_always_rtm2;
1332 
1333   if (RTMLockingCalculationDelay > 0) {
1334     // Delay calculation
1335     movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg);
1336     testptr(tmpReg, tmpReg);
1337     jccb(Assembler::equal, L_done);
1338   }
1339   // Abort ratio calculation only if abort_count > RTMAbortThreshold
1340   //   Aborted transactions = abort_count * 100
1341   //   All transactions = total_count *  RTMTotalCountIncrRate
1342   //   Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
1343 
1344   movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset()));
1345   cmpptr(tmpReg, RTMAbortThreshold);
1346   jccb(Assembler::below, L_check_always_rtm2);
1347   imulptr(tmpReg, tmpReg, 100);
1348 
1349   Register scrReg = rtm_counters_Reg;
1350   movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1351   imulptr(scrReg, scrReg, RTMTotalCountIncrRate);
1352   imulptr(scrReg, scrReg, RTMAbortRatio);
1353   cmpptr(tmpReg, scrReg);
1354   jccb(Assembler::below, L_check_always_rtm1);
1355   if (method_data != NULL) {
1356     // set rtm_state to "no rtm" in MDO
1357     mov_metadata(tmpReg, method_data);
1358     if (os::is_MP()) {
1359       lock();
1360     }
1361     orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
1362   }
1363   jmpb(L_done);
1364   bind(L_check_always_rtm1);
1365   // Reload RTMLockingCounters* address
1366   lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1367   bind(L_check_always_rtm2);
1368   movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1369   cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
1370   jccb(Assembler::below, L_done);
1371   if (method_data != NULL) {
1372     // set rtm_state to "always rtm" in MDO
1373     mov_metadata(tmpReg, method_data);
1374     if (os::is_MP()) {
1375       lock();
1376     }
1377     orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
1378   }
1379   bind(L_done);
1380 }
1381 
1382 // Update counters and perform abort ratio calculation
1383 // input:  abort_status_Reg
1384 // rtm_counters_Reg, flags are killed
1385 void MacroAssembler::rtm_profiling(Register abort_status_Reg,
1386                                    Register rtm_counters_Reg,
1387                                    RTMLockingCounters* rtm_counters,
1388                                    Metadata* method_data,
1389                                    bool profile_rtm) {
1390 
1391   assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1392   // update rtm counters based on rax value at abort
1393   // reads abort_status_Reg, updates flags
1394   lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1395   rtm_counters_update(abort_status_Reg, rtm_counters_Reg);
1396   if (profile_rtm) {
1397     // Save abort status because abort_status_Reg is used by following code.
1398     if (RTMRetryCount > 0) {
1399       push(abort_status_Reg);
1400     }
1401     assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1402     rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
1403     // restore abort status
1404     if (RTMRetryCount > 0) {
1405       pop(abort_status_Reg);
1406     }
1407   }
1408 }
1409 
1410 // Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4)
1411 // inputs: retry_count_Reg
1412 //       : abort_status_Reg
1413 // output: retry_count_Reg decremented by 1
1414 // flags are killed
1415 void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) {
1416   Label doneRetry;
1417   assert(abort_status_Reg == rax, "");
1418   // The abort reason bits are in eax (see all states in rtmLocking.hpp)
1419   // 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4)
1420   // if reason is in 0x6 and retry count != 0 then retry
1421   andptr(abort_status_Reg, 0x6);
1422   jccb(Assembler::zero, doneRetry);
1423   testl(retry_count_Reg, retry_count_Reg);
1424   jccb(Assembler::zero, doneRetry);
1425   pause();
1426   decrementl(retry_count_Reg);
1427   jmp(retryLabel);
1428   bind(doneRetry);
1429 }
1430 
1431 // Spin and retry if lock is busy,
1432 // inputs: box_Reg (monitor address)
1433 //       : retry_count_Reg
1434 // output: retry_count_Reg decremented by 1
1435 //       : clear z flag if retry count exceeded
1436 // tmp_Reg, scr_Reg, flags are killed
1437 void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
1438                                             Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
1439   Label SpinLoop, SpinExit, doneRetry;
1440   int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1441 
1442   testl(retry_count_Reg, retry_count_Reg);
1443   jccb(Assembler::zero, doneRetry);
1444   decrementl(retry_count_Reg);
1445   movptr(scr_Reg, RTMSpinLoopCount);
1446 
1447   bind(SpinLoop);
1448   pause();
1449   decrementl(scr_Reg);
1450   jccb(Assembler::lessEqual, SpinExit);
1451   movptr(tmp_Reg, Address(box_Reg, owner_offset));
1452   testptr(tmp_Reg, tmp_Reg);
1453   jccb(Assembler::notZero, SpinLoop);
1454 
1455   bind(SpinExit);
1456   jmp(retryLabel);
1457   bind(doneRetry);
1458   incrementl(retry_count_Reg); // clear z flag
1459 }
1460 
1461 // Use RTM for normal stack locks
1462 // Input: objReg (object to lock)
1463 void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg,
1464                                        Register retry_on_abort_count_Reg,
1465                                        RTMLockingCounters* stack_rtm_counters,
1466                                        Metadata* method_data, bool profile_rtm,
1467                                        Label& DONE_LABEL, Label& IsInflated) {
1468   assert(UseRTMForStackLocks, "why call this otherwise?");
1469   assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1470   assert(tmpReg == rax, "");
1471   assert(scrReg == rdx, "");
1472   Label L_rtm_retry, L_decrement_retry, L_on_abort;
1473 
1474   if (RTMRetryCount > 0) {
1475     movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1476     bind(L_rtm_retry);
1477   }
1478   movptr(tmpReg, Address(objReg, 0));
1479   testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
1480   jcc(Assembler::notZero, IsInflated);
1481 
1482   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1483     Label L_noincrement;
1484     if (RTMTotalCountIncrRate > 1) {
1485       // tmpReg, scrReg and flags are killed
1486       branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1487     }
1488     assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
1489     atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
1490     bind(L_noincrement);
1491   }
1492   xbegin(L_on_abort);
1493   movptr(tmpReg, Address(objReg, 0));       // fetch markword
1494   andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1495   cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
1496   jcc(Assembler::equal, DONE_LABEL);        // all done if unlocked
1497 
1498   Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1499   if (UseRTMXendForLockBusy) {
1500     xend();
1501     movptr(abort_status_Reg, 0x2);   // Set the abort status to 2 (so we can retry)
1502     jmp(L_decrement_retry);
1503   }
1504   else {
1505     xabort(0);
1506   }
1507   bind(L_on_abort);
1508   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1509     rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm);
1510   }
1511   bind(L_decrement_retry);
1512   if (RTMRetryCount > 0) {
1513     // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1514     rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1515   }
1516 }
1517 
1518 // Use RTM for inflating locks
1519 // inputs: objReg (object to lock)
1520 //         boxReg (on-stack box address (displaced header location) - KILLED)
1521 //         tmpReg (ObjectMonitor address + markOopDesc::monitor_value)
1522 void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
1523                                           Register scrReg, Register retry_on_busy_count_Reg,
1524                                           Register retry_on_abort_count_Reg,
1525                                           RTMLockingCounters* rtm_counters,
1526                                           Metadata* method_data, bool profile_rtm,
1527                                           Label& DONE_LABEL) {
1528   assert(UseRTMLocking, "why call this otherwise?");
1529   assert(tmpReg == rax, "");
1530   assert(scrReg == rdx, "");
1531   Label L_rtm_retry, L_decrement_retry, L_on_abort;
1532   int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1533 
1534   // Without cast to int32_t a movptr will destroy r10 which is typically obj
1535   movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1536   movptr(boxReg, tmpReg); // Save ObjectMonitor address
1537 
1538   if (RTMRetryCount > 0) {
1539     movl(retry_on_busy_count_Reg, RTMRetryCount);  // Retry on lock busy
1540     movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1541     bind(L_rtm_retry);
1542   }
1543   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1544     Label L_noincrement;
1545     if (RTMTotalCountIncrRate > 1) {
1546       // tmpReg, scrReg and flags are killed
1547       branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1548     }
1549     assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1550     atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
1551     bind(L_noincrement);
1552   }
1553   xbegin(L_on_abort);
1554   movptr(tmpReg, Address(objReg, 0));
1555   movptr(tmpReg, Address(tmpReg, owner_offset));
1556   testptr(tmpReg, tmpReg);
1557   jcc(Assembler::zero, DONE_LABEL);
1558   if (UseRTMXendForLockBusy) {
1559     xend();
1560     jmp(L_decrement_retry);
1561   }
1562   else {
1563     xabort(0);
1564   }
1565   bind(L_on_abort);
1566   Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1567   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1568     rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
1569   }
1570   if (RTMRetryCount > 0) {
1571     // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1572     rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1573   }
1574 
1575   movptr(tmpReg, Address(boxReg, owner_offset)) ;
1576   testptr(tmpReg, tmpReg) ;
1577   jccb(Assembler::notZero, L_decrement_retry) ;
1578 
1579   // Appears unlocked - try to swing _owner from null to non-null.
1580   // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
1581 #ifdef _LP64
1582   Register threadReg = r15_thread;
1583 #else
1584   get_thread(scrReg);
1585   Register threadReg = scrReg;
1586 #endif
1587   if (os::is_MP()) {
1588     lock();
1589   }
1590   cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
1591 
1592   if (RTMRetryCount > 0) {
1593     // success done else retry
1594     jccb(Assembler::equal, DONE_LABEL) ;
1595     bind(L_decrement_retry);
1596     // Spin and retry if lock is busy.
1597     rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
1598   }
1599   else {
1600     bind(L_decrement_retry);
1601   }
1602 }
1603 
1604 #endif //  INCLUDE_RTM_OPT
1605 
1606 // Fast_Lock and Fast_Unlock used by C2
1607 
1608 // Because the transitions from emitted code to the runtime
1609 // monitorenter/exit helper stubs are so slow it's critical that
1610 // we inline both the stack-locking fast-path and the inflated fast path.
1611 //
1612 // See also: cmpFastLock and cmpFastUnlock.
1613 //
1614 // What follows is a specialized inline transliteration of the code
1615 // in slow_enter() and slow_exit().  If we're concerned about I$ bloat
1616 // another option would be to emit TrySlowEnter and TrySlowExit methods
1617 // at startup-time.  These methods would accept arguments as
1618 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
1619 // indications in the icc.ZFlag.  Fast_Lock and Fast_Unlock would simply
1620 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
1621 // In practice, however, the # of lock sites is bounded and is usually small.
1622 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
1623 // if the processor uses simple bimodal branch predictors keyed by EIP
1624 // Since the helper routines would be called from multiple synchronization
1625 // sites.
1626 //
1627 // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
1628 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
1629 // to those specialized methods.  That'd give us a mostly platform-independent
1630 // implementation that the JITs could optimize and inline at their pleasure.
1631 // Done correctly, the only time we'd need to cross to native could would be
1632 // to park() or unpark() threads.  We'd also need a few more unsafe operators
1633 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
1634 // (b) explicit barriers or fence operations.
1635 //
1636 // TODO:
1637 //
1638 // *  Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
1639 //    This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
1640 //    Given TLAB allocation, Self is usually manifested in a register, so passing it into
1641 //    the lock operators would typically be faster than reifying Self.
1642 //
1643 // *  Ideally I'd define the primitives as:
1644 //       fast_lock   (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
1645 //       fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
1646 //    Unfortunately ADLC bugs prevent us from expressing the ideal form.
1647 //    Instead, we're stuck with a rather awkward and brittle register assignments below.
1648 //    Furthermore the register assignments are overconstrained, possibly resulting in
1649 //    sub-optimal code near the synchronization site.
1650 //
1651 // *  Eliminate the sp-proximity tests and just use "== Self" tests instead.
1652 //    Alternately, use a better sp-proximity test.
1653 //
1654 // *  Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
1655 //    Either one is sufficient to uniquely identify a thread.
1656 //    TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
1657 //
1658 // *  Intrinsify notify() and notifyAll() for the common cases where the
1659 //    object is locked by the calling thread but the waitlist is empty.
1660 //    avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
1661 //
1662 // *  use jccb and jmpb instead of jcc and jmp to improve code density.
1663 //    But beware of excessive branch density on AMD Opterons.
1664 //
1665 // *  Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
1666 //    or failure of the fast-path.  If the fast-path fails then we pass
1667 //    control to the slow-path, typically in C.  In Fast_Lock and
1668 //    Fast_Unlock we often branch to DONE_LABEL, just to find that C2
1669 //    will emit a conditional branch immediately after the node.
1670 //    So we have branches to branches and lots of ICC.ZF games.
1671 //    Instead, it might be better to have C2 pass a "FailureLabel"
1672 //    into Fast_Lock and Fast_Unlock.  In the case of success, control
1673 //    will drop through the node.  ICC.ZF is undefined at exit.
1674 //    In the case of failure, the node will branch directly to the
1675 //    FailureLabel
1676 
1677 
1678 // obj: object to lock
1679 // box: on-stack box address (displaced header location) - KILLED
1680 // rax,: tmp -- KILLED
1681 // scr: tmp -- KILLED
1682 void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg,
1683                                Register scrReg, Register cx1Reg, Register cx2Reg,
1684                                BiasedLockingCounters* counters,
1685                                RTMLockingCounters* rtm_counters,
1686                                RTMLockingCounters* stack_rtm_counters,
1687                                Metadata* method_data,
1688                                bool use_rtm, bool profile_rtm) {
1689   // Ensure the register assignents are disjoint
1690   assert(tmpReg == rax, "");
1691 
1692   if (use_rtm) {
1693     assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
1694   } else {
1695     assert(cx1Reg == noreg, "");
1696     assert(cx2Reg == noreg, "");
1697     assert_different_registers(objReg, boxReg, tmpReg, scrReg);
1698   }
1699 
1700   if (counters != NULL) {
1701     atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
1702   }
1703   if (EmitSync & 1) {
1704       // set box->dhw = markOopDesc::unused_mark()
1705       // Force all sync thru slow-path: slow_enter() and slow_exit()
1706       movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1707       cmpptr (rsp, (int32_t)NULL_WORD);
1708   } else {
1709     // Possible cases that we'll encounter in fast_lock
1710     // ------------------------------------------------
1711     // * Inflated
1712     //    -- unlocked
1713     //    -- Locked
1714     //       = by self
1715     //       = by other
1716     // * biased
1717     //    -- by Self
1718     //    -- by other
1719     // * neutral
1720     // * stack-locked
1721     //    -- by self
1722     //       = sp-proximity test hits
1723     //       = sp-proximity test generates false-negative
1724     //    -- by other
1725     //
1726 
1727     Label IsInflated, DONE_LABEL;
1728 
1729     // it's stack-locked, biased or neutral
1730     // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
1731     // order to reduce the number of conditional branches in the most common cases.
1732     // Beware -- there's a subtle invariant that fetch of the markword
1733     // at [FETCH], below, will never observe a biased encoding (*101b).
1734     // If this invariant is not held we risk exclusion (safety) failure.
1735     if (UseBiasedLocking && !UseOptoBiasInlining) {
1736       biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
1737     }
1738 
1739 #if INCLUDE_RTM_OPT
1740     if (UseRTMForStackLocks && use_rtm) {
1741       rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
1742                         stack_rtm_counters, method_data, profile_rtm,
1743                         DONE_LABEL, IsInflated);
1744     }
1745 #endif // INCLUDE_RTM_OPT
1746 
1747     movptr(tmpReg, Address(objReg, 0));          // [FETCH]
1748     testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
1749     jccb(Assembler::notZero, IsInflated);
1750 
1751     // Attempt stack-locking ...
1752     orptr (tmpReg, markOopDesc::unlocked_value);
1753     movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
1754     if (os::is_MP()) {
1755       lock();
1756     }
1757     cmpxchgptr(boxReg, Address(objReg, 0));      // Updates tmpReg
1758     if (counters != NULL) {
1759       cond_inc32(Assembler::equal,
1760                  ExternalAddress((address)counters->fast_path_entry_count_addr()));
1761     }
1762     jcc(Assembler::equal, DONE_LABEL);           // Success
1763 
1764     // Recursive locking.
1765     // The object is stack-locked: markword contains stack pointer to BasicLock.
1766     // Locked by current thread if difference with current SP is less than one page.
1767     subptr(tmpReg, rsp);
1768     // Next instruction set ZFlag == 1 (Success) if difference is less then one page.
1769     andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
1770     movptr(Address(boxReg, 0), tmpReg);
1771     if (counters != NULL) {
1772       cond_inc32(Assembler::equal,
1773                  ExternalAddress((address)counters->fast_path_entry_count_addr()));
1774     }
1775     jmp(DONE_LABEL);
1776 
1777     bind(IsInflated);
1778     // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value
1779 
1780 #if INCLUDE_RTM_OPT
1781     // Use the same RTM locking code in 32- and 64-bit VM.
1782     if (use_rtm) {
1783       rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
1784                            rtm_counters, method_data, profile_rtm, DONE_LABEL);
1785     } else {
1786 #endif // INCLUDE_RTM_OPT
1787 
1788 #ifndef _LP64
1789     // The object is inflated.
1790 
1791     // boxReg refers to the on-stack BasicLock in the current frame.
1792     // We'd like to write:
1793     //   set box->_displaced_header = markOopDesc::unused_mark().  Any non-0 value suffices.
1794     // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
1795     // additional latency as we have another ST in the store buffer that must drain.
1796 
1797     if (EmitSync & 8192) {
1798        movptr(Address(boxReg, 0), 3);            // results in ST-before-CAS penalty
1799        get_thread (scrReg);
1800        movptr(boxReg, tmpReg);                    // consider: LEA box, [tmp-2]
1801        movptr(tmpReg, NULL_WORD);                 // consider: xor vs mov
1802        if (os::is_MP()) {
1803          lock();
1804        }
1805        cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1806     } else
1807     if ((EmitSync & 128) == 0) {                      // avoid ST-before-CAS
1808        // register juggle because we need tmpReg for cmpxchgptr below
1809        movptr(scrReg, boxReg);
1810        movptr(boxReg, tmpReg);                   // consider: LEA box, [tmp-2]
1811 
1812        // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1813        if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1814           // prefetchw [eax + Offset(_owner)-2]
1815           prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1816        }
1817 
1818        if ((EmitSync & 64) == 0) {
1819          // Optimistic form: consider XORL tmpReg,tmpReg
1820          movptr(tmpReg, NULL_WORD);
1821        } else {
1822          // Can suffer RTS->RTO upgrades on shared or cold $ lines
1823          // Test-And-CAS instead of CAS
1824          movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));   // rax, = m->_owner
1825          testptr(tmpReg, tmpReg);                   // Locked ?
1826          jccb  (Assembler::notZero, DONE_LABEL);
1827        }
1828 
1829        // Appears unlocked - try to swing _owner from null to non-null.
1830        // Ideally, I'd manifest "Self" with get_thread and then attempt
1831        // to CAS the register containing Self into m->Owner.
1832        // But we don't have enough registers, so instead we can either try to CAS
1833        // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
1834        // we later store "Self" into m->Owner.  Transiently storing a stack address
1835        // (rsp or the address of the box) into  m->owner is harmless.
1836        // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
1837        if (os::is_MP()) {
1838          lock();
1839        }
1840        cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1841        movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
1842        // If we weren't able to swing _owner from NULL to the BasicLock
1843        // then take the slow path.
1844        jccb  (Assembler::notZero, DONE_LABEL);
1845        // update _owner from BasicLock to thread
1846        get_thread (scrReg);                    // beware: clobbers ICCs
1847        movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg);
1848        xorptr(boxReg, boxReg);                 // set icc.ZFlag = 1 to indicate success
1849 
1850        // If the CAS fails we can either retry or pass control to the slow-path.
1851        // We use the latter tactic.
1852        // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1853        // If the CAS was successful ...
1854        //   Self has acquired the lock
1855        //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1856        // Intentional fall-through into DONE_LABEL ...
1857     } else {
1858        movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark()));  // results in ST-before-CAS penalty
1859        movptr(boxReg, tmpReg);
1860 
1861        // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1862        if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1863           // prefetchw [eax + Offset(_owner)-2]
1864           prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1865        }
1866 
1867        if ((EmitSync & 64) == 0) {
1868          // Optimistic form
1869          xorptr  (tmpReg, tmpReg);
1870        } else {
1871          // Can suffer RTS->RTO upgrades on shared or cold $ lines
1872          movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));   // rax, = m->_owner
1873          testptr(tmpReg, tmpReg);                   // Locked ?
1874          jccb  (Assembler::notZero, DONE_LABEL);
1875        }
1876 
1877        // Appears unlocked - try to swing _owner from null to non-null.
1878        // Use either "Self" (in scr) or rsp as thread identity in _owner.
1879        // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
1880        get_thread (scrReg);
1881        if (os::is_MP()) {
1882          lock();
1883        }
1884        cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1885 
1886        // If the CAS fails we can either retry or pass control to the slow-path.
1887        // We use the latter tactic.
1888        // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1889        // If the CAS was successful ...
1890        //   Self has acquired the lock
1891        //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1892        // Intentional fall-through into DONE_LABEL ...
1893     }
1894 #else // _LP64
1895     // It's inflated
1896     movq(scrReg, tmpReg);
1897     xorq(tmpReg, tmpReg);
1898 
1899     if (os::is_MP()) {
1900       lock();
1901     }
1902     cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
1903     // Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
1904     // Without cast to int32_t movptr will destroy r10 which is typically obj.
1905     movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1906     // Intentional fall-through into DONE_LABEL ...
1907     // Propagate ICC.ZF from CAS above into DONE_LABEL.
1908 #endif // _LP64
1909 #if INCLUDE_RTM_OPT
1910     } // use_rtm()
1911 #endif
1912     // DONE_LABEL is a hot target - we'd really like to place it at the
1913     // start of cache line by padding with NOPs.
1914     // See the AMD and Intel software optimization manuals for the
1915     // most efficient "long" NOP encodings.
1916     // Unfortunately none of our alignment mechanisms suffice.
1917     bind(DONE_LABEL);
1918 
1919     // At DONE_LABEL the icc ZFlag is set as follows ...
1920     // Fast_Unlock uses the same protocol.
1921     // ZFlag == 1 -> Success
1922     // ZFlag == 0 -> Failure - force control through the slow-path
1923   }
1924 }
1925 
1926 // obj: object to unlock
1927 // box: box address (displaced header location), killed.  Must be EAX.
1928 // tmp: killed, cannot be obj nor box.
1929 //
1930 // Some commentary on balanced locking:
1931 //
1932 // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
1933 // Methods that don't have provably balanced locking are forced to run in the
1934 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
1935 // The interpreter provides two properties:
1936 // I1:  At return-time the interpreter automatically and quietly unlocks any
1937 //      objects acquired the current activation (frame).  Recall that the
1938 //      interpreter maintains an on-stack list of locks currently held by
1939 //      a frame.
1940 // I2:  If a method attempts to unlock an object that is not held by the
1941 //      the frame the interpreter throws IMSX.
1942 //
1943 // Lets say A(), which has provably balanced locking, acquires O and then calls B().
1944 // B() doesn't have provably balanced locking so it runs in the interpreter.
1945 // Control returns to A() and A() unlocks O.  By I1 and I2, above, we know that O
1946 // is still locked by A().
1947 //
1948 // The only other source of unbalanced locking would be JNI.  The "Java Native Interface:
1949 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
1950 // should not be unlocked by "normal" java-level locking and vice-versa.  The specification
1951 // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
1952 // Arguably given that the spec legislates the JNI case as undefined our implementation
1953 // could reasonably *avoid* checking owner in Fast_Unlock().
1954 // In the interest of performance we elide m->Owner==Self check in unlock.
1955 // A perfectly viable alternative is to elide the owner check except when
1956 // Xcheck:jni is enabled.
1957 
1958 void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
1959   assert(boxReg == rax, "");
1960   assert_different_registers(objReg, boxReg, tmpReg);
1961 
1962   if (EmitSync & 4) {
1963     // Disable - inhibit all inlining.  Force control through the slow-path
1964     cmpptr (rsp, 0);
1965   } else {
1966     Label DONE_LABEL, Stacked, CheckSucc;
1967 
1968     // Critically, the biased locking test must have precedence over
1969     // and appear before the (box->dhw == 0) recursive stack-lock test.
1970     if (UseBiasedLocking && !UseOptoBiasInlining) {
1971        biased_locking_exit(objReg, tmpReg, DONE_LABEL);
1972     }
1973 
1974 #if INCLUDE_RTM_OPT
1975     if (UseRTMForStackLocks && use_rtm) {
1976       assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1977       Label L_regular_unlock;
1978       movptr(tmpReg, Address(objReg, 0));           // fetch markword
1979       andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1980       cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
1981       jccb(Assembler::notEqual, L_regular_unlock);  // if !HLE RegularLock
1982       xend();                                       // otherwise end...
1983       jmp(DONE_LABEL);                              // ... and we're done
1984       bind(L_regular_unlock);
1985     }
1986 #endif
1987 
1988     cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
1989     jcc   (Assembler::zero, DONE_LABEL);            // 0 indicates recursive stack-lock
1990     movptr(tmpReg, Address(objReg, 0));             // Examine the object's markword
1991     testptr(tmpReg, markOopDesc::monitor_value);    // Inflated?
1992     jccb  (Assembler::zero, Stacked);
1993 
1994     // It's inflated.
1995 #if INCLUDE_RTM_OPT
1996     if (use_rtm) {
1997       Label L_regular_inflated_unlock;
1998       int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1999       movptr(boxReg, Address(tmpReg, owner_offset));
2000       testptr(boxReg, boxReg);
2001       jccb(Assembler::notZero, L_regular_inflated_unlock);
2002       xend();
2003       jmpb(DONE_LABEL);
2004       bind(L_regular_inflated_unlock);
2005     }
2006 #endif
2007 
2008     // Despite our balanced locking property we still check that m->_owner == Self
2009     // as java routines or native JNI code called by this thread might
2010     // have released the lock.
2011     // Refer to the comments in synchronizer.cpp for how we might encode extra
2012     // state in _succ so we can avoid fetching EntryList|cxq.
2013     //
2014     // I'd like to add more cases in fast_lock() and fast_unlock() --
2015     // such as recursive enter and exit -- but we have to be wary of
2016     // I$ bloat, T$ effects and BP$ effects.
2017     //
2018     // If there's no contention try a 1-0 exit.  That is, exit without
2019     // a costly MEMBAR or CAS.  See synchronizer.cpp for details on how
2020     // we detect and recover from the race that the 1-0 exit admits.
2021     //
2022     // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
2023     // before it STs null into _owner, releasing the lock.  Updates
2024     // to data protected by the critical section must be visible before
2025     // we drop the lock (and thus before any other thread could acquire
2026     // the lock and observe the fields protected by the lock).
2027     // IA32's memory-model is SPO, so STs are ordered with respect to
2028     // each other and there's no need for an explicit barrier (fence).
2029     // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
2030 #ifndef _LP64
2031     get_thread (boxReg);
2032     if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
2033       // prefetchw [ebx + Offset(_owner)-2]
2034       prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2035     }
2036 
2037     // Note that we could employ various encoding schemes to reduce
2038     // the number of loads below (currently 4) to just 2 or 3.
2039     // Refer to the comments in synchronizer.cpp.
2040     // In practice the chain of fetches doesn't seem to impact performance, however.
2041     xorptr(boxReg, boxReg);
2042     if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
2043        // Attempt to reduce branch density - AMD's branch predictor.
2044        orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
2045        orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
2046        orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
2047        jccb  (Assembler::notZero, DONE_LABEL);
2048        movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
2049        jmpb  (DONE_LABEL);
2050     } else {
2051        orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
2052        jccb  (Assembler::notZero, DONE_LABEL);
2053        movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
2054        orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
2055        jccb  (Assembler::notZero, CheckSucc);
2056        movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
2057        jmpb  (DONE_LABEL);
2058     }
2059 
2060     // The Following code fragment (EmitSync & 65536) improves the performance of
2061     // contended applications and contended synchronization microbenchmarks.
2062     // Unfortunately the emission of the code - even though not executed - causes regressions
2063     // in scimark and jetstream, evidently because of $ effects.  Replacing the code
2064     // with an equal number of never-executed NOPs results in the same regression.
2065     // We leave it off by default.
2066 
2067     if ((EmitSync & 65536) != 0) {
2068        Label LSuccess, LGoSlowPath ;
2069 
2070        bind  (CheckSucc);
2071 
2072        // Optional pre-test ... it's safe to elide this
2073        cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
2074        jccb(Assembler::zero, LGoSlowPath);
2075 
2076        // We have a classic Dekker-style idiom:
2077        //    ST m->_owner = 0 ; MEMBAR; LD m->_succ
2078        // There are a number of ways to implement the barrier:
2079        // (1) lock:andl &m->_owner, 0
2080        //     is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
2081        //     LOCK: ANDL [ebx+Offset(_Owner)-2], 0
2082        //     Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
2083        // (2) If supported, an explicit MFENCE is appealing.
2084        //     In older IA32 processors MFENCE is slower than lock:add or xchg
2085        //     particularly if the write-buffer is full as might be the case if
2086        //     if stores closely precede the fence or fence-equivalent instruction.
2087        //     See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
2088        //     as the situation has changed with Nehalem and Shanghai.
2089        // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
2090        //     The $lines underlying the top-of-stack should be in M-state.
2091        //     The locked add instruction is serializing, of course.
2092        // (4) Use xchg, which is serializing
2093        //     mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
2094        // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
2095        //     The integer condition codes will tell us if succ was 0.
2096        //     Since _succ and _owner should reside in the same $line and
2097        //     we just stored into _owner, it's likely that the $line
2098        //     remains in M-state for the lock:orl.
2099        //
2100        // We currently use (3), although it's likely that switching to (2)
2101        // is correct for the future.
2102 
2103        movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
2104        if (os::is_MP()) {
2105          lock(); addptr(Address(rsp, 0), 0);
2106        }
2107        // Ratify _succ remains non-null
2108        cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), 0);
2109        jccb  (Assembler::notZero, LSuccess);
2110 
2111        xorptr(boxReg, boxReg);                  // box is really EAX
2112        if (os::is_MP()) { lock(); }
2113        cmpxchgptr(rsp, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2114        // There's no successor so we tried to regrab the lock with the
2115        // placeholder value. If that didn't work, then another thread
2116        // grabbed the lock so we're done (and exit was a success).
2117        jccb  (Assembler::notEqual, LSuccess);
2118        // Since we're low on registers we installed rsp as a placeholding in _owner.
2119        // Now install Self over rsp.  This is safe as we're transitioning from
2120        // non-null to non=null
2121        get_thread (boxReg);
2122        movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), boxReg);
2123        // Intentional fall-through into LGoSlowPath ...
2124 
2125        bind  (LGoSlowPath);
2126        orptr(boxReg, 1);                      // set ICC.ZF=0 to indicate failure
2127        jmpb  (DONE_LABEL);
2128 
2129        bind  (LSuccess);
2130        xorptr(boxReg, boxReg);                 // set ICC.ZF=1 to indicate success
2131        jmpb  (DONE_LABEL);
2132     }
2133 
2134     bind (Stacked);
2135     // It's not inflated and it's not recursively stack-locked and it's not biased.
2136     // It must be stack-locked.
2137     // Try to reset the header to displaced header.
2138     // The "box" value on the stack is stable, so we can reload
2139     // and be assured we observe the same value as above.
2140     movptr(tmpReg, Address(boxReg, 0));
2141     if (os::is_MP()) {
2142       lock();
2143     }
2144     cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2145     // Intention fall-thru into DONE_LABEL
2146 
2147     // DONE_LABEL is a hot target - we'd really like to place it at the
2148     // start of cache line by padding with NOPs.
2149     // See the AMD and Intel software optimization manuals for the
2150     // most efficient "long" NOP encodings.
2151     // Unfortunately none of our alignment mechanisms suffice.
2152     if ((EmitSync & 65536) == 0) {
2153        bind (CheckSucc);
2154     }
2155 #else // _LP64
2156     // It's inflated
2157     if (EmitSync & 1024) {
2158       // Emit code to check that _owner == Self
2159       // We could fold the _owner test into subsequent code more efficiently
2160       // than using a stand-alone check, but since _owner checking is off by
2161       // default we don't bother. We also might consider predicating the
2162       // _owner==Self check on Xcheck:jni or running on a debug build.
2163       movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2164       xorptr(boxReg, r15_thread);
2165     } else {
2166       xorptr(boxReg, boxReg);
2167     }
2168     orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
2169     jccb  (Assembler::notZero, DONE_LABEL);
2170     movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
2171     orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
2172     jccb  (Assembler::notZero, CheckSucc);
2173     movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
2174     jmpb  (DONE_LABEL);
2175 
2176     if ((EmitSync & 65536) == 0) {
2177       // Try to avoid passing control into the slow_path ...
2178       Label LSuccess, LGoSlowPath ;
2179       bind  (CheckSucc);
2180 
2181       // The following optional optimization can be elided if necessary
2182       // Effectively: if (succ == null) goto SlowPath
2183       // The code reduces the window for a race, however,
2184       // and thus benefits performance.
2185       cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
2186       jccb  (Assembler::zero, LGoSlowPath);
2187 
2188       if ((EmitSync & 16) && os::is_MP()) {
2189         orptr(boxReg, boxReg);
2190         xchgptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2191       } else {
2192         movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
2193         if (os::is_MP()) {
2194           // Memory barrier/fence
2195           // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ
2196           // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
2197           // This is faster on Nehalem and AMD Shanghai/Barcelona.
2198           // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
2199           // We might also restructure (ST Owner=0;barrier;LD _Succ) to
2200           // (mov box,0; xchgq box, &m->Owner; LD _succ) .
2201           lock(); addl(Address(rsp, 0), 0);
2202         }
2203       }
2204       cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
2205       jccb  (Assembler::notZero, LSuccess);
2206 
2207       // Rare inopportune interleaving - race.
2208       // The successor vanished in the small window above.
2209       // The lock is contended -- (cxq|EntryList) != null -- and there's no apparent successor.
2210       // We need to ensure progress and succession.
2211       // Try to reacquire the lock.
2212       // If that fails then the new owner is responsible for succession and this
2213       // thread needs to take no further action and can exit via the fast path (success).
2214       // If the re-acquire succeeds then pass control into the slow path.
2215       // As implemented, this latter mode is horrible because we generated more
2216       // coherence traffic on the lock *and* artifically extended the critical section
2217       // length while by virtue of passing control into the slow path.
2218 
2219       // box is really RAX -- the following CMPXCHG depends on that binding
2220       // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
2221       movptr(boxReg, (int32_t)NULL_WORD);
2222       if (os::is_MP()) { lock(); }
2223       cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
2224       // There's no successor so we tried to regrab the lock.
2225       // If that didn't work, then another thread grabbed the
2226       // lock so we're done (and exit was a success).
2227       jccb  (Assembler::notEqual, LSuccess);
2228       // Intentional fall-through into slow-path
2229 
2230       bind  (LGoSlowPath);
2231       orl   (boxReg, 1);                      // set ICC.ZF=0 to indicate failure
2232       jmpb  (DONE_LABEL);
2233 
2234       bind  (LSuccess);
2235       testl (boxReg, 0);                      // set ICC.ZF=1 to indicate success
2236       jmpb  (DONE_LABEL);
2237     }
2238 
2239     bind  (Stacked);
2240     movptr(tmpReg, Address (boxReg, 0));      // re-fetch
2241     if (os::is_MP()) { lock(); }
2242     cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2243 
2244     if (EmitSync & 65536) {
2245        bind (CheckSucc);
2246     }
2247 #endif
2248     bind(DONE_LABEL);
2249   }
2250 }
2251 #endif // COMPILER2
2252 
2253 void MacroAssembler::c2bool(Register x) {
2254   // implements x == 0 ? 0 : 1
2255   // note: must only look at least-significant byte of x
2256   //       since C-style booleans are stored in one byte
2257   //       only! (was bug)
2258   andl(x, 0xFF);
2259   setb(Assembler::notZero, x);
2260 }
2261 
2262 // Wouldn't need if AddressLiteral version had new name
2263 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
2264   Assembler::call(L, rtype);
2265 }
2266 
2267 void MacroAssembler::call(Register entry) {
2268   Assembler::call(entry);
2269 }
2270 
2271 void MacroAssembler::call(AddressLiteral entry) {
2272   if (reachable(entry)) {
2273     Assembler::call_literal(entry.target(), entry.rspec());
2274   } else {
2275     lea(rscratch1, entry);
2276     Assembler::call(rscratch1);
2277   }
2278 }
2279 
2280 void MacroAssembler::ic_call(address entry, jint method_index) {
2281   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
2282   movptr(rax, (intptr_t)Universe::non_oop_word());
2283   call(AddressLiteral(entry, rh));
2284 }
2285 
2286 // Implementation of call_VM versions
2287 
2288 void MacroAssembler::call_VM(Register oop_result,
2289                              address entry_point,
2290                              bool check_exceptions) {
2291   Label C, E;
2292   call(C, relocInfo::none);
2293   jmp(E);
2294 
2295   bind(C);
2296   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
2297   ret(0);
2298 
2299   bind(E);
2300 }
2301 
2302 void MacroAssembler::call_VM(Register oop_result,
2303                              address entry_point,
2304                              Register arg_1,
2305                              bool check_exceptions) {
2306   Label C, E;
2307   call(C, relocInfo::none);
2308   jmp(E);
2309 
2310   bind(C);
2311   pass_arg1(this, arg_1);
2312   call_VM_helper(oop_result, entry_point, 1, check_exceptions);
2313   ret(0);
2314 
2315   bind(E);
2316 }
2317 
2318 void MacroAssembler::call_VM(Register oop_result,
2319                              address entry_point,
2320                              Register arg_1,
2321                              Register arg_2,
2322                              bool check_exceptions) {
2323   Label C, E;
2324   call(C, relocInfo::none);
2325   jmp(E);
2326 
2327   bind(C);
2328 
2329   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2330 
2331   pass_arg2(this, arg_2);
2332   pass_arg1(this, arg_1);
2333   call_VM_helper(oop_result, entry_point, 2, check_exceptions);
2334   ret(0);
2335 
2336   bind(E);
2337 }
2338 
2339 void MacroAssembler::call_VM(Register oop_result,
2340                              address entry_point,
2341                              Register arg_1,
2342                              Register arg_2,
2343                              Register arg_3,
2344                              bool check_exceptions) {
2345   Label C, E;
2346   call(C, relocInfo::none);
2347   jmp(E);
2348 
2349   bind(C);
2350 
2351   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2352   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2353   pass_arg3(this, arg_3);
2354 
2355   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2356   pass_arg2(this, arg_2);
2357 
2358   pass_arg1(this, arg_1);
2359   call_VM_helper(oop_result, entry_point, 3, check_exceptions);
2360   ret(0);
2361 
2362   bind(E);
2363 }
2364 
2365 void MacroAssembler::call_VM(Register oop_result,
2366                              Register last_java_sp,
2367                              address entry_point,
2368                              int number_of_arguments,
2369                              bool check_exceptions) {
2370   Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2371   call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2372 }
2373 
2374 void MacroAssembler::call_VM(Register oop_result,
2375                              Register last_java_sp,
2376                              address entry_point,
2377                              Register arg_1,
2378                              bool check_exceptions) {
2379   pass_arg1(this, arg_1);
2380   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2381 }
2382 
2383 void MacroAssembler::call_VM(Register oop_result,
2384                              Register last_java_sp,
2385                              address entry_point,
2386                              Register arg_1,
2387                              Register arg_2,
2388                              bool check_exceptions) {
2389 
2390   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2391   pass_arg2(this, arg_2);
2392   pass_arg1(this, arg_1);
2393   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2394 }
2395 
2396 void MacroAssembler::call_VM(Register oop_result,
2397                              Register last_java_sp,
2398                              address entry_point,
2399                              Register arg_1,
2400                              Register arg_2,
2401                              Register arg_3,
2402                              bool check_exceptions) {
2403   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2404   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2405   pass_arg3(this, arg_3);
2406   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2407   pass_arg2(this, arg_2);
2408   pass_arg1(this, arg_1);
2409   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2410 }
2411 
2412 void MacroAssembler::super_call_VM(Register oop_result,
2413                                    Register last_java_sp,
2414                                    address entry_point,
2415                                    int number_of_arguments,
2416                                    bool check_exceptions) {
2417   Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2418   MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2419 }
2420 
2421 void MacroAssembler::super_call_VM(Register oop_result,
2422                                    Register last_java_sp,
2423                                    address entry_point,
2424                                    Register arg_1,
2425                                    bool check_exceptions) {
2426   pass_arg1(this, arg_1);
2427   super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2428 }
2429 
2430 void MacroAssembler::super_call_VM(Register oop_result,
2431                                    Register last_java_sp,
2432                                    address entry_point,
2433                                    Register arg_1,
2434                                    Register arg_2,
2435                                    bool check_exceptions) {
2436 
2437   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2438   pass_arg2(this, arg_2);
2439   pass_arg1(this, arg_1);
2440   super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2441 }
2442 
2443 void MacroAssembler::super_call_VM(Register oop_result,
2444                                    Register last_java_sp,
2445                                    address entry_point,
2446                                    Register arg_1,
2447                                    Register arg_2,
2448                                    Register arg_3,
2449                                    bool check_exceptions) {
2450   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2451   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2452   pass_arg3(this, arg_3);
2453   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2454   pass_arg2(this, arg_2);
2455   pass_arg1(this, arg_1);
2456   super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2457 }
2458 
2459 void MacroAssembler::call_VM_base(Register oop_result,
2460                                   Register java_thread,
2461                                   Register last_java_sp,
2462                                   address  entry_point,
2463                                   int      number_of_arguments,
2464                                   bool     check_exceptions) {
2465   // determine java_thread register
2466   if (!java_thread->is_valid()) {
2467 #ifdef _LP64
2468     java_thread = r15_thread;
2469 #else
2470     java_thread = rdi;
2471     get_thread(java_thread);
2472 #endif // LP64
2473   }
2474   // determine last_java_sp register
2475   if (!last_java_sp->is_valid()) {
2476     last_java_sp = rsp;
2477   }
2478   // debugging support
2479   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
2480   LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
2481 #ifdef ASSERT
2482   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
2483   // r12 is the heapbase.
2484   LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
2485 #endif // ASSERT
2486 
2487   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
2488   assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
2489 
2490   // push java thread (becomes first argument of C function)
2491 
2492   NOT_LP64(push(java_thread); number_of_arguments++);
2493   LP64_ONLY(mov(c_rarg0, r15_thread));
2494 
2495   // set last Java frame before call
2496   assert(last_java_sp != rbp, "can't use ebp/rbp");
2497 
2498   // Only interpreter should have to set fp
2499   set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
2500 
2501   // do the call, remove parameters
2502   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
2503 
2504   // restore the thread (cannot use the pushed argument since arguments
2505   // may be overwritten by C code generated by an optimizing compiler);
2506   // however can use the register value directly if it is callee saved.
2507   if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
2508     // rdi & rsi (also r15) are callee saved -> nothing to do
2509 #ifdef ASSERT
2510     guarantee(java_thread != rax, "change this code");
2511     push(rax);
2512     { Label L;
2513       get_thread(rax);
2514       cmpptr(java_thread, rax);
2515       jcc(Assembler::equal, L);
2516       STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
2517       bind(L);
2518     }
2519     pop(rax);
2520 #endif
2521   } else {
2522     get_thread(java_thread);
2523   }
2524   // reset last Java frame
2525   // Only interpreter should have to clear fp
2526   reset_last_Java_frame(java_thread, true, false);
2527 
2528 #ifndef CC_INTERP
2529    // C++ interp handles this in the interpreter
2530   check_and_handle_popframe(java_thread);
2531   check_and_handle_earlyret(java_thread);
2532 #endif /* CC_INTERP */
2533 
2534   if (check_exceptions) {
2535     // check for pending exceptions (java_thread is set upon return)
2536     cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
2537 #ifndef _LP64
2538     jump_cc(Assembler::notEqual,
2539             RuntimeAddress(StubRoutines::forward_exception_entry()));
2540 #else
2541     // This used to conditionally jump to forward_exception however it is
2542     // possible if we relocate that the branch will not reach. So we must jump
2543     // around so we can always reach
2544 
2545     Label ok;
2546     jcc(Assembler::equal, ok);
2547     jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2548     bind(ok);
2549 #endif // LP64
2550   }
2551 
2552   // get oop result if there is one and reset the value in the thread
2553   if (oop_result->is_valid()) {
2554     get_vm_result(oop_result, java_thread);
2555   }
2556 }
2557 
2558 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
2559 
2560   // Calculate the value for last_Java_sp
2561   // somewhat subtle. call_VM does an intermediate call
2562   // which places a return address on the stack just under the
2563   // stack pointer as the user finsihed with it. This allows
2564   // use to retrieve last_Java_pc from last_Java_sp[-1].
2565   // On 32bit we then have to push additional args on the stack to accomplish
2566   // the actual requested call. On 64bit call_VM only can use register args
2567   // so the only extra space is the return address that call_VM created.
2568   // This hopefully explains the calculations here.
2569 
2570 #ifdef _LP64
2571   // We've pushed one address, correct last_Java_sp
2572   lea(rax, Address(rsp, wordSize));
2573 #else
2574   lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
2575 #endif // LP64
2576 
2577   call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
2578 
2579 }
2580 
2581 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2582   call_VM_leaf_base(entry_point, number_of_arguments);
2583 }
2584 
2585 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2586   pass_arg0(this, arg_0);
2587   call_VM_leaf(entry_point, 1);
2588 }
2589 
2590 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2591 
2592   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2593   pass_arg1(this, arg_1);
2594   pass_arg0(this, arg_0);
2595   call_VM_leaf(entry_point, 2);
2596 }
2597 
2598 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2599   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2600   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2601   pass_arg2(this, arg_2);
2602   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2603   pass_arg1(this, arg_1);
2604   pass_arg0(this, arg_0);
2605   call_VM_leaf(entry_point, 3);
2606 }
2607 
2608 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2609   pass_arg0(this, arg_0);
2610   MacroAssembler::call_VM_leaf_base(entry_point, 1);
2611 }
2612 
2613 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2614 
2615   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2616   pass_arg1(this, arg_1);
2617   pass_arg0(this, arg_0);
2618   MacroAssembler::call_VM_leaf_base(entry_point, 2);
2619 }
2620 
2621 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2622   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2623   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2624   pass_arg2(this, arg_2);
2625   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2626   pass_arg1(this, arg_1);
2627   pass_arg0(this, arg_0);
2628   MacroAssembler::call_VM_leaf_base(entry_point, 3);
2629 }
2630 
2631 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2632   LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
2633   LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2634   LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2635   pass_arg3(this, arg_3);
2636   LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2637   LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2638   pass_arg2(this, arg_2);
2639   LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2640   pass_arg1(this, arg_1);
2641   pass_arg0(this, arg_0);
2642   MacroAssembler::call_VM_leaf_base(entry_point, 4);
2643 }
2644 
2645 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
2646   movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
2647   movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
2648   verify_oop(oop_result, "broken oop in call_VM_base");
2649 }
2650 
2651 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
2652   movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
2653   movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
2654 }
2655 
2656 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2657 }
2658 
2659 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2660 }
2661 
2662 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
2663   if (reachable(src1)) {
2664     cmpl(as_Address(src1), imm);
2665   } else {
2666     lea(rscratch1, src1);
2667     cmpl(Address(rscratch1, 0), imm);
2668   }
2669 }
2670 
2671 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
2672   assert(!src2.is_lval(), "use cmpptr");
2673   if (reachable(src2)) {
2674     cmpl(src1, as_Address(src2));
2675   } else {
2676     lea(rscratch1, src2);
2677     cmpl(src1, Address(rscratch1, 0));
2678   }
2679 }
2680 
2681 void MacroAssembler::cmp32(Register src1, int32_t imm) {
2682   Assembler::cmpl(src1, imm);
2683 }
2684 
2685 void MacroAssembler::cmp32(Register src1, Address src2) {
2686   Assembler::cmpl(src1, src2);
2687 }
2688 
2689 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2690   ucomisd(opr1, opr2);
2691 
2692   Label L;
2693   if (unordered_is_less) {
2694     movl(dst, -1);
2695     jcc(Assembler::parity, L);
2696     jcc(Assembler::below , L);
2697     movl(dst, 0);
2698     jcc(Assembler::equal , L);
2699     increment(dst);
2700   } else { // unordered is greater
2701     movl(dst, 1);
2702     jcc(Assembler::parity, L);
2703     jcc(Assembler::above , L);
2704     movl(dst, 0);
2705     jcc(Assembler::equal , L);
2706     decrementl(dst);
2707   }
2708   bind(L);
2709 }
2710 
2711 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2712   ucomiss(opr1, opr2);
2713 
2714   Label L;
2715   if (unordered_is_less) {
2716     movl(dst, -1);
2717     jcc(Assembler::parity, L);
2718     jcc(Assembler::below , L);
2719     movl(dst, 0);
2720     jcc(Assembler::equal , L);
2721     increment(dst);
2722   } else { // unordered is greater
2723     movl(dst, 1);
2724     jcc(Assembler::parity, L);
2725     jcc(Assembler::above , L);
2726     movl(dst, 0);
2727     jcc(Assembler::equal , L);
2728     decrementl(dst);
2729   }
2730   bind(L);
2731 }
2732 
2733 
2734 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
2735   if (reachable(src1)) {
2736     cmpb(as_Address(src1), imm);
2737   } else {
2738     lea(rscratch1, src1);
2739     cmpb(Address(rscratch1, 0), imm);
2740   }
2741 }
2742 
2743 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
2744 #ifdef _LP64
2745   if (src2.is_lval()) {
2746     movptr(rscratch1, src2);
2747     Assembler::cmpq(src1, rscratch1);
2748   } else if (reachable(src2)) {
2749     cmpq(src1, as_Address(src2));
2750   } else {
2751     lea(rscratch1, src2);
2752     Assembler::cmpq(src1, Address(rscratch1, 0));
2753   }
2754 #else
2755   if (src2.is_lval()) {
2756     cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2757   } else {
2758     cmpl(src1, as_Address(src2));
2759   }
2760 #endif // _LP64
2761 }
2762 
2763 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
2764   assert(src2.is_lval(), "not a mem-mem compare");
2765 #ifdef _LP64
2766   // moves src2's literal address
2767   movptr(rscratch1, src2);
2768   Assembler::cmpq(src1, rscratch1);
2769 #else
2770   cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2771 #endif // _LP64
2772 }
2773 
2774 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
2775   if (reachable(adr)) {
2776     if (os::is_MP())
2777       lock();
2778     cmpxchgptr(reg, as_Address(adr));
2779   } else {
2780     lea(rscratch1, adr);
2781     if (os::is_MP())
2782       lock();
2783     cmpxchgptr(reg, Address(rscratch1, 0));
2784   }
2785 }
2786 
2787 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
2788   LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
2789 }
2790 
2791 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
2792   if (reachable(src)) {
2793     Assembler::comisd(dst, as_Address(src));
2794   } else {
2795     lea(rscratch1, src);
2796     Assembler::comisd(dst, Address(rscratch1, 0));
2797   }
2798 }
2799 
2800 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
2801   if (reachable(src)) {
2802     Assembler::comiss(dst, as_Address(src));
2803   } else {
2804     lea(rscratch1, src);
2805     Assembler::comiss(dst, Address(rscratch1, 0));
2806   }
2807 }
2808 
2809 
2810 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
2811   Condition negated_cond = negate_condition(cond);
2812   Label L;
2813   jcc(negated_cond, L);
2814   pushf(); // Preserve flags
2815   atomic_incl(counter_addr);
2816   popf();
2817   bind(L);
2818 }
2819 
2820 int MacroAssembler::corrected_idivl(Register reg) {
2821   // Full implementation of Java idiv and irem; checks for
2822   // special case as described in JVM spec., p.243 & p.271.
2823   // The function returns the (pc) offset of the idivl
2824   // instruction - may be needed for implicit exceptions.
2825   //
2826   //         normal case                           special case
2827   //
2828   // input : rax,: dividend                         min_int
2829   //         reg: divisor   (may not be rax,/rdx)   -1
2830   //
2831   // output: rax,: quotient  (= rax, idiv reg)       min_int
2832   //         rdx: remainder (= rax, irem reg)       0
2833   assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
2834   const int min_int = 0x80000000;
2835   Label normal_case, special_case;
2836 
2837   // check for special case
2838   cmpl(rax, min_int);
2839   jcc(Assembler::notEqual, normal_case);
2840   xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
2841   cmpl(reg, -1);
2842   jcc(Assembler::equal, special_case);
2843 
2844   // handle normal case
2845   bind(normal_case);
2846   cdql();
2847   int idivl_offset = offset();
2848   idivl(reg);
2849 
2850   // normal and special case exit
2851   bind(special_case);
2852 
2853   return idivl_offset;
2854 }
2855 
2856 
2857 
2858 void MacroAssembler::decrementl(Register reg, int value) {
2859   if (value == min_jint) {subl(reg, value) ; return; }
2860   if (value <  0) { incrementl(reg, -value); return; }
2861   if (value == 0) {                        ; return; }
2862   if (value == 1 && UseIncDec) { decl(reg) ; return; }
2863   /* else */      { subl(reg, value)       ; return; }
2864 }
2865 
2866 void MacroAssembler::decrementl(Address dst, int value) {
2867   if (value == min_jint) {subl(dst, value) ; return; }
2868   if (value <  0) { incrementl(dst, -value); return; }
2869   if (value == 0) {                        ; return; }
2870   if (value == 1 && UseIncDec) { decl(dst) ; return; }
2871   /* else */      { subl(dst, value)       ; return; }
2872 }
2873 
2874 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
2875   assert (shift_value > 0, "illegal shift value");
2876   Label _is_positive;
2877   testl (reg, reg);
2878   jcc (Assembler::positive, _is_positive);
2879   int offset = (1 << shift_value) - 1 ;
2880 
2881   if (offset == 1) {
2882     incrementl(reg);
2883   } else {
2884     addl(reg, offset);
2885   }
2886 
2887   bind (_is_positive);
2888   sarl(reg, shift_value);
2889 }
2890 
2891 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
2892   if (reachable(src)) {
2893     Assembler::divsd(dst, as_Address(src));
2894   } else {
2895     lea(rscratch1, src);
2896     Assembler::divsd(dst, Address(rscratch1, 0));
2897   }
2898 }
2899 
2900 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
2901   if (reachable(src)) {
2902     Assembler::divss(dst, as_Address(src));
2903   } else {
2904     lea(rscratch1, src);
2905     Assembler::divss(dst, Address(rscratch1, 0));
2906   }
2907 }
2908 
2909 // !defined(COMPILER2) is because of stupid core builds
2910 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) || INCLUDE_JVMCI
2911 void MacroAssembler::empty_FPU_stack() {
2912   if (VM_Version::supports_mmx()) {
2913     emms();
2914   } else {
2915     for (int i = 8; i-- > 0; ) ffree(i);
2916   }
2917 }
2918 #endif // !LP64 || C1 || !C2 || INCLUDE_JVMCI
2919 
2920 
2921 // Defines obj, preserves var_size_in_bytes
2922 void MacroAssembler::eden_allocate(Register obj,
2923                                    Register var_size_in_bytes,
2924                                    int con_size_in_bytes,
2925                                    Register t1,
2926                                    Label& slow_case) {
2927   assert(obj == rax, "obj must be in rax, for cmpxchg");
2928   assert_different_registers(obj, var_size_in_bytes, t1);
2929   if (!Universe::heap()->supports_inline_contig_alloc()) {
2930     jmp(slow_case);
2931   } else {
2932     Register end = t1;
2933     Label retry;
2934     bind(retry);
2935     ExternalAddress heap_top((address) Universe::heap()->top_addr());
2936     movptr(obj, heap_top);
2937     if (var_size_in_bytes == noreg) {
2938       lea(end, Address(obj, con_size_in_bytes));
2939     } else {
2940       lea(end, Address(obj, var_size_in_bytes, Address::times_1));
2941     }
2942     // if end < obj then we wrapped around => object too long => slow case
2943     cmpptr(end, obj);
2944     jcc(Assembler::below, slow_case);
2945     cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
2946     jcc(Assembler::above, slow_case);
2947     // Compare obj with the top addr, and if still equal, store the new top addr in
2948     // end at the address of the top addr pointer. Sets ZF if was equal, and clears
2949     // it otherwise. Use lock prefix for atomicity on MPs.
2950     locked_cmpxchgptr(end, heap_top);
2951     jcc(Assembler::notEqual, retry);
2952   }
2953 }
2954 
2955 void MacroAssembler::enter() {
2956   push(rbp);
2957   mov(rbp, rsp);
2958 }
2959 
2960 // A 5 byte nop that is safe for patching (see patch_verified_entry)
2961 void MacroAssembler::fat_nop() {
2962   if (UseAddressNop) {
2963     addr_nop_5();
2964   } else {
2965     emit_int8(0x26); // es:
2966     emit_int8(0x2e); // cs:
2967     emit_int8(0x64); // fs:
2968     emit_int8(0x65); // gs:
2969     emit_int8((unsigned char)0x90);
2970   }
2971 }
2972 
2973 void MacroAssembler::fcmp(Register tmp) {
2974   fcmp(tmp, 1, true, true);
2975 }
2976 
2977 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
2978   assert(!pop_right || pop_left, "usage error");
2979   if (VM_Version::supports_cmov()) {
2980     assert(tmp == noreg, "unneeded temp");
2981     if (pop_left) {
2982       fucomip(index);
2983     } else {
2984       fucomi(index);
2985     }
2986     if (pop_right) {
2987       fpop();
2988     }
2989   } else {
2990     assert(tmp != noreg, "need temp");
2991     if (pop_left) {
2992       if (pop_right) {
2993         fcompp();
2994       } else {
2995         fcomp(index);
2996       }
2997     } else {
2998       fcom(index);
2999     }
3000     // convert FPU condition into eflags condition via rax,
3001     save_rax(tmp);
3002     fwait(); fnstsw_ax();
3003     sahf();
3004     restore_rax(tmp);
3005   }
3006   // condition codes set as follows:
3007   //
3008   // CF (corresponds to C0) if x < y
3009   // PF (corresponds to C2) if unordered
3010   // ZF (corresponds to C3) if x = y
3011 }
3012 
3013 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
3014   fcmp2int(dst, unordered_is_less, 1, true, true);
3015 }
3016 
3017 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
3018   fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
3019   Label L;
3020   if (unordered_is_less) {
3021     movl(dst, -1);
3022     jcc(Assembler::parity, L);
3023     jcc(Assembler::below , L);
3024     movl(dst, 0);
3025     jcc(Assembler::equal , L);
3026     increment(dst);
3027   } else { // unordered is greater
3028     movl(dst, 1);
3029     jcc(Assembler::parity, L);
3030     jcc(Assembler::above , L);
3031     movl(dst, 0);
3032     jcc(Assembler::equal , L);
3033     decrementl(dst);
3034   }
3035   bind(L);
3036 }
3037 
3038 void MacroAssembler::fld_d(AddressLiteral src) {
3039   fld_d(as_Address(src));
3040 }
3041 
3042 void MacroAssembler::fld_s(AddressLiteral src) {
3043   fld_s(as_Address(src));
3044 }
3045 
3046 void MacroAssembler::fld_x(AddressLiteral src) {
3047   Assembler::fld_x(as_Address(src));
3048 }
3049 
3050 void MacroAssembler::fldcw(AddressLiteral src) {
3051   Assembler::fldcw(as_Address(src));
3052 }
3053 
3054 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src) {
3055   if (reachable(src)) {
3056     Assembler::mulpd(dst, as_Address(src));
3057   } else {
3058     lea(rscratch1, src);
3059     Assembler::mulpd(dst, Address(rscratch1, 0));
3060   }
3061 }
3062 
3063 void MacroAssembler::increase_precision() {
3064   subptr(rsp, BytesPerWord);
3065   fnstcw(Address(rsp, 0));
3066   movl(rax, Address(rsp, 0));
3067   orl(rax, 0x300);
3068   push(rax);
3069   fldcw(Address(rsp, 0));
3070   pop(rax);
3071 }
3072 
3073 void MacroAssembler::restore_precision() {
3074   fldcw(Address(rsp, 0));
3075   addptr(rsp, BytesPerWord);
3076 }
3077 
3078 void MacroAssembler::fpop() {
3079   ffree();
3080   fincstp();
3081 }
3082 
3083 void MacroAssembler::load_float(Address src) {
3084   if (UseSSE >= 1) {
3085     movflt(xmm0, src);
3086   } else {
3087     LP64_ONLY(ShouldNotReachHere());
3088     NOT_LP64(fld_s(src));
3089   }
3090 }
3091 
3092 void MacroAssembler::store_float(Address dst) {
3093   if (UseSSE >= 1) {
3094     movflt(dst, xmm0);
3095   } else {
3096     LP64_ONLY(ShouldNotReachHere());
3097     NOT_LP64(fstp_s(dst));
3098   }
3099 }
3100 
3101 void MacroAssembler::load_double(Address src) {
3102   if (UseSSE >= 2) {
3103     movdbl(xmm0, src);
3104   } else {
3105     LP64_ONLY(ShouldNotReachHere());
3106     NOT_LP64(fld_d(src));
3107   }
3108 }
3109 
3110 void MacroAssembler::store_double(Address dst) {
3111   if (UseSSE >= 2) {
3112     movdbl(dst, xmm0);
3113   } else {
3114     LP64_ONLY(ShouldNotReachHere());
3115     NOT_LP64(fstp_d(dst));
3116   }
3117 }
3118 
3119 void MacroAssembler::fremr(Register tmp) {
3120   save_rax(tmp);
3121   { Label L;
3122     bind(L);
3123     fprem();
3124     fwait(); fnstsw_ax();
3125 #ifdef _LP64
3126     testl(rax, 0x400);
3127     jcc(Assembler::notEqual, L);
3128 #else
3129     sahf();
3130     jcc(Assembler::parity, L);
3131 #endif // _LP64
3132   }
3133   restore_rax(tmp);
3134   // Result is in ST0.
3135   // Note: fxch & fpop to get rid of ST1
3136   // (otherwise FPU stack could overflow eventually)
3137   fxch(1);
3138   fpop();
3139 }
3140 
3141 
3142 void MacroAssembler::incrementl(AddressLiteral dst) {
3143   if (reachable(dst)) {
3144     incrementl(as_Address(dst));
3145   } else {
3146     lea(rscratch1, dst);
3147     incrementl(Address(rscratch1, 0));
3148   }
3149 }
3150 
3151 void MacroAssembler::incrementl(ArrayAddress dst) {
3152   incrementl(as_Address(dst));
3153 }
3154 
3155 void MacroAssembler::incrementl(Register reg, int value) {
3156   if (value == min_jint) {addl(reg, value) ; return; }
3157   if (value <  0) { decrementl(reg, -value); return; }
3158   if (value == 0) {                        ; return; }
3159   if (value == 1 && UseIncDec) { incl(reg) ; return; }
3160   /* else */      { addl(reg, value)       ; return; }
3161 }
3162 
3163 void MacroAssembler::incrementl(Address dst, int value) {
3164   if (value == min_jint) {addl(dst, value) ; return; }
3165   if (value <  0) { decrementl(dst, -value); return; }
3166   if (value == 0) {                        ; return; }
3167   if (value == 1 && UseIncDec) { incl(dst) ; return; }
3168   /* else */      { addl(dst, value)       ; return; }
3169 }
3170 
3171 void MacroAssembler::jump(AddressLiteral dst) {
3172   if (reachable(dst)) {
3173     jmp_literal(dst.target(), dst.rspec());
3174   } else {
3175     lea(rscratch1, dst);
3176     jmp(rscratch1);
3177   }
3178 }
3179 
3180 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
3181   if (reachable(dst)) {
3182     InstructionMark im(this);
3183     relocate(dst.reloc());
3184     const int short_size = 2;
3185     const int long_size = 6;
3186     int offs = (intptr_t)dst.target() - ((intptr_t)pc());
3187     if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
3188       // 0111 tttn #8-bit disp
3189       emit_int8(0x70 | cc);
3190       emit_int8((offs - short_size) & 0xFF);
3191     } else {
3192       // 0000 1111 1000 tttn #32-bit disp
3193       emit_int8(0x0F);
3194       emit_int8((unsigned char)(0x80 | cc));
3195       emit_int32(offs - long_size);
3196     }
3197   } else {
3198 #ifdef ASSERT
3199     warning("reversing conditional branch");
3200 #endif /* ASSERT */
3201     Label skip;
3202     jccb(reverse[cc], skip);
3203     lea(rscratch1, dst);
3204     Assembler::jmp(rscratch1);
3205     bind(skip);
3206   }
3207 }
3208 
3209 void MacroAssembler::ldmxcsr(AddressLiteral src) {
3210   if (reachable(src)) {
3211     Assembler::ldmxcsr(as_Address(src));
3212   } else {
3213     lea(rscratch1, src);
3214     Assembler::ldmxcsr(Address(rscratch1, 0));
3215   }
3216 }
3217 
3218 int MacroAssembler::load_signed_byte(Register dst, Address src) {
3219   int off;
3220   if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3221     off = offset();
3222     movsbl(dst, src); // movsxb
3223   } else {
3224     off = load_unsigned_byte(dst, src);
3225     shll(dst, 24);
3226     sarl(dst, 24);
3227   }
3228   return off;
3229 }
3230 
3231 // Note: load_signed_short used to be called load_signed_word.
3232 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
3233 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
3234 // The term "word" in HotSpot means a 32- or 64-bit machine word.
3235 int MacroAssembler::load_signed_short(Register dst, Address src) {
3236   int off;
3237   if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3238     // This is dubious to me since it seems safe to do a signed 16 => 64 bit
3239     // version but this is what 64bit has always done. This seems to imply
3240     // that users are only using 32bits worth.
3241     off = offset();
3242     movswl(dst, src); // movsxw
3243   } else {
3244     off = load_unsigned_short(dst, src);
3245     shll(dst, 16);
3246     sarl(dst, 16);
3247   }
3248   return off;
3249 }
3250 
3251 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
3252   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3253   // and "3.9 Partial Register Penalties", p. 22).
3254   int off;
3255   if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
3256     off = offset();
3257     movzbl(dst, src); // movzxb
3258   } else {
3259     xorl(dst, dst);
3260     off = offset();
3261     movb(dst, src);
3262   }
3263   return off;
3264 }
3265 
3266 // Note: load_unsigned_short used to be called load_unsigned_word.
3267 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
3268   // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3269   // and "3.9 Partial Register Penalties", p. 22).
3270   int off;
3271   if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
3272     off = offset();
3273     movzwl(dst, src); // movzxw
3274   } else {
3275     xorl(dst, dst);
3276     off = offset();
3277     movw(dst, src);
3278   }
3279   return off;
3280 }
3281 
3282 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
3283   switch (size_in_bytes) {
3284 #ifndef _LP64
3285   case  8:
3286     assert(dst2 != noreg, "second dest register required");
3287     movl(dst,  src);
3288     movl(dst2, src.plus_disp(BytesPerInt));
3289     break;
3290 #else
3291   case  8:  movq(dst, src); break;
3292 #endif
3293   case  4:  movl(dst, src); break;
3294   case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
3295   case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
3296   default:  ShouldNotReachHere();
3297   }
3298 }
3299 
3300 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
3301   switch (size_in_bytes) {
3302 #ifndef _LP64
3303   case  8:
3304     assert(src2 != noreg, "second source register required");
3305     movl(dst,                        src);
3306     movl(dst.plus_disp(BytesPerInt), src2);
3307     break;
3308 #else
3309   case  8:  movq(dst, src); break;
3310 #endif
3311   case  4:  movl(dst, src); break;
3312   case  2:  movw(dst, src); break;
3313   case  1:  movb(dst, src); break;
3314   default:  ShouldNotReachHere();
3315   }
3316 }
3317 
3318 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
3319   if (reachable(dst)) {
3320     movl(as_Address(dst), src);
3321   } else {
3322     lea(rscratch1, dst);
3323     movl(Address(rscratch1, 0), src);
3324   }
3325 }
3326 
3327 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
3328   if (reachable(src)) {
3329     movl(dst, as_Address(src));
3330   } else {
3331     lea(rscratch1, src);
3332     movl(dst, Address(rscratch1, 0));
3333   }
3334 }
3335 
3336 // C++ bool manipulation
3337 
3338 void MacroAssembler::movbool(Register dst, Address src) {
3339   if(sizeof(bool) == 1)
3340     movb(dst, src);
3341   else if(sizeof(bool) == 2)
3342     movw(dst, src);
3343   else if(sizeof(bool) == 4)
3344     movl(dst, src);
3345   else
3346     // unsupported
3347     ShouldNotReachHere();
3348 }
3349 
3350 void MacroAssembler::movbool(Address dst, bool boolconst) {
3351   if(sizeof(bool) == 1)
3352     movb(dst, (int) boolconst);
3353   else if(sizeof(bool) == 2)
3354     movw(dst, (int) boolconst);
3355   else if(sizeof(bool) == 4)
3356     movl(dst, (int) boolconst);
3357   else
3358     // unsupported
3359     ShouldNotReachHere();
3360 }
3361 
3362 void MacroAssembler::movbool(Address dst, Register src) {
3363   if(sizeof(bool) == 1)
3364     movb(dst, src);
3365   else if(sizeof(bool) == 2)
3366     movw(dst, src);
3367   else if(sizeof(bool) == 4)
3368     movl(dst, src);
3369   else
3370     // unsupported
3371     ShouldNotReachHere();
3372 }
3373 
3374 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
3375   movb(as_Address(dst), src);
3376 }
3377 
3378 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
3379   if (reachable(src)) {
3380     movdl(dst, as_Address(src));
3381   } else {
3382     lea(rscratch1, src);
3383     movdl(dst, Address(rscratch1, 0));
3384   }
3385 }
3386 
3387 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
3388   if (reachable(src)) {
3389     movq(dst, as_Address(src));
3390   } else {
3391     lea(rscratch1, src);
3392     movq(dst, Address(rscratch1, 0));
3393   }
3394 }
3395 
3396 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
3397   if (reachable(src)) {
3398     if (UseXmmLoadAndClearUpper) {
3399       movsd (dst, as_Address(src));
3400     } else {
3401       movlpd(dst, as_Address(src));
3402     }
3403   } else {
3404     lea(rscratch1, src);
3405     if (UseXmmLoadAndClearUpper) {
3406       movsd (dst, Address(rscratch1, 0));
3407     } else {
3408       movlpd(dst, Address(rscratch1, 0));
3409     }
3410   }
3411 }
3412 
3413 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
3414   if (reachable(src)) {
3415     movss(dst, as_Address(src));
3416   } else {
3417     lea(rscratch1, src);
3418     movss(dst, Address(rscratch1, 0));
3419   }
3420 }
3421 
3422 void MacroAssembler::movptr(Register dst, Register src) {
3423   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3424 }
3425 
3426 void MacroAssembler::movptr(Register dst, Address src) {
3427   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3428 }
3429 
3430 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
3431 void MacroAssembler::movptr(Register dst, intptr_t src) {
3432   LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
3433 }
3434 
3435 void MacroAssembler::movptr(Address dst, Register src) {
3436   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3437 }
3438 
3439 void MacroAssembler::movdqu(Address dst, XMMRegister src) {
3440   if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) {
3441     Assembler::vextractf32x4h(dst, src, 0);
3442   } else {
3443     Assembler::movdqu(dst, src);
3444   }
3445 }
3446 
3447 void MacroAssembler::movdqu(XMMRegister dst, Address src) {
3448   if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) {
3449     Assembler::vinsertf32x4h(dst, src, 0);
3450   } else {
3451     Assembler::movdqu(dst, src);
3452   }
3453 }
3454 
3455 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) {
3456   if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
3457     Assembler::evmovdqul(dst, src, Assembler::AVX_512bit);
3458   } else {
3459     Assembler::movdqu(dst, src);
3460   }
3461 }
3462 
3463 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
3464   if (reachable(src)) {
3465     movdqu(dst, as_Address(src));
3466   } else {
3467     lea(rscratch1, src);
3468     movdqu(dst, Address(rscratch1, 0));
3469   }
3470 }
3471 
3472 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
3473   if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) {
3474     Assembler::vextractf64x4h(dst, src, 0);
3475   } else {
3476     Assembler::vmovdqu(dst, src);
3477   }
3478 }
3479 
3480 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
3481   if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) {
3482     Assembler::vinsertf64x4h(dst, src, 0);
3483   } else {
3484     Assembler::vmovdqu(dst, src);
3485   }
3486 }
3487 
3488 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) {
3489   if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
3490     Assembler::evmovdqul(dst, src, Assembler::AVX_512bit);
3491   }
3492   else {
3493     Assembler::vmovdqu(dst, src);
3494   }
3495 }
3496 
3497 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src) {
3498   if (reachable(src)) {
3499     vmovdqu(dst, as_Address(src));
3500   }
3501   else {
3502     lea(rscratch1, src);
3503     vmovdqu(dst, Address(rscratch1, 0));
3504   }
3505 }
3506 
3507 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) {
3508   if (reachable(src)) {
3509     Assembler::movdqa(dst, as_Address(src));
3510   } else {
3511     lea(rscratch1, src);
3512     Assembler::movdqa(dst, Address(rscratch1, 0));
3513   }
3514 }
3515 
3516 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
3517   if (reachable(src)) {
3518     Assembler::movsd(dst, as_Address(src));
3519   } else {
3520     lea(rscratch1, src);
3521     Assembler::movsd(dst, Address(rscratch1, 0));
3522   }
3523 }
3524 
3525 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
3526   if (reachable(src)) {
3527     Assembler::movss(dst, as_Address(src));
3528   } else {
3529     lea(rscratch1, src);
3530     Assembler::movss(dst, Address(rscratch1, 0));
3531   }
3532 }
3533 
3534 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
3535   if (reachable(src)) {
3536     Assembler::mulsd(dst, as_Address(src));
3537   } else {
3538     lea(rscratch1, src);
3539     Assembler::mulsd(dst, Address(rscratch1, 0));
3540   }
3541 }
3542 
3543 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
3544   if (reachable(src)) {
3545     Assembler::mulss(dst, as_Address(src));
3546   } else {
3547     lea(rscratch1, src);
3548     Assembler::mulss(dst, Address(rscratch1, 0));
3549   }
3550 }
3551 
3552 void MacroAssembler::null_check(Register reg, int offset) {
3553   if (needs_explicit_null_check(offset)) {
3554     // provoke OS NULL exception if reg = NULL by
3555     // accessing M[reg] w/o changing any (non-CC) registers
3556     // NOTE: cmpl is plenty here to provoke a segv
3557     cmpptr(rax, Address(reg, 0));
3558     // Note: should probably use testl(rax, Address(reg, 0));
3559     //       may be shorter code (however, this version of
3560     //       testl needs to be implemented first)
3561   } else {
3562     // nothing to do, (later) access of M[reg + offset]
3563     // will provoke OS NULL exception if reg = NULL
3564   }
3565 }
3566 
3567 void MacroAssembler::os_breakpoint() {
3568   // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
3569   // (e.g., MSVC can't call ps() otherwise)
3570   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
3571 }
3572 
3573 #ifdef _LP64
3574 #define XSTATE_BV 0x200
3575 #endif
3576 
3577 void MacroAssembler::pop_CPU_state() {
3578   pop_FPU_state();
3579   pop_IU_state();
3580 }
3581 
3582 void MacroAssembler::pop_FPU_state() {
3583 #ifndef _LP64
3584   frstor(Address(rsp, 0));
3585 #else
3586   fxrstor(Address(rsp, 0));
3587 #endif
3588   addptr(rsp, FPUStateSizeInWords * wordSize);
3589 }
3590 
3591 void MacroAssembler::pop_IU_state() {
3592   popa();
3593   LP64_ONLY(addq(rsp, 8));
3594   popf();
3595 }
3596 
3597 // Save Integer and Float state
3598 // Warning: Stack must be 16 byte aligned (64bit)
3599 void MacroAssembler::push_CPU_state() {
3600   push_IU_state();
3601   push_FPU_state();
3602 }
3603 
3604 void MacroAssembler::push_FPU_state() {
3605   subptr(rsp, FPUStateSizeInWords * wordSize);
3606 #ifndef _LP64
3607   fnsave(Address(rsp, 0));
3608   fwait();
3609 #else
3610   fxsave(Address(rsp, 0));
3611 #endif // LP64
3612 }
3613 
3614 void MacroAssembler::push_IU_state() {
3615   // Push flags first because pusha kills them
3616   pushf();
3617   // Make sure rsp stays 16-byte aligned
3618   LP64_ONLY(subq(rsp, 8));
3619   pusha();
3620 }
3621 
3622 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
3623   // determine java_thread register
3624   if (!java_thread->is_valid()) {
3625     java_thread = rdi;
3626     get_thread(java_thread);
3627   }
3628   // we must set sp to zero to clear frame
3629   movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
3630   if (clear_fp) {
3631     movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
3632   }
3633 
3634   if (clear_pc)
3635     movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
3636 
3637 }
3638 
3639 void MacroAssembler::restore_rax(Register tmp) {
3640   if (tmp == noreg) pop(rax);
3641   else if (tmp != rax) mov(rax, tmp);
3642 }
3643 
3644 void MacroAssembler::round_to(Register reg, int modulus) {
3645   addptr(reg, modulus - 1);
3646   andptr(reg, -modulus);
3647 }
3648 
3649 void MacroAssembler::save_rax(Register tmp) {
3650   if (tmp == noreg) push(rax);
3651   else if (tmp != rax) mov(tmp, rax);
3652 }
3653 
3654 // Write serialization page so VM thread can do a pseudo remote membar.
3655 // We use the current thread pointer to calculate a thread specific
3656 // offset to write to within the page. This minimizes bus traffic
3657 // due to cache line collision.
3658 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
3659   movl(tmp, thread);
3660   shrl(tmp, os::get_serialize_page_shift_count());
3661   andl(tmp, (os::vm_page_size() - sizeof(int)));
3662 
3663   Address index(noreg, tmp, Address::times_1);
3664   ExternalAddress page(os::get_memory_serialize_page());
3665 
3666   // Size of store must match masking code above
3667   movl(as_Address(ArrayAddress(page, index)), tmp);
3668 }
3669 
3670 // Calls to C land
3671 //
3672 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
3673 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
3674 // has to be reset to 0. This is required to allow proper stack traversal.
3675 void MacroAssembler::set_last_Java_frame(Register java_thread,
3676                                          Register last_java_sp,
3677                                          Register last_java_fp,
3678                                          address  last_java_pc) {
3679   // determine java_thread register
3680   if (!java_thread->is_valid()) {
3681     java_thread = rdi;
3682     get_thread(java_thread);
3683   }
3684   // determine last_java_sp register
3685   if (!last_java_sp->is_valid()) {
3686     last_java_sp = rsp;
3687   }
3688 
3689   // last_java_fp is optional
3690 
3691   if (last_java_fp->is_valid()) {
3692     movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
3693   }
3694 
3695   // last_java_pc is optional
3696 
3697   if (last_java_pc != NULL) {
3698     lea(Address(java_thread,
3699                  JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
3700         InternalAddress(last_java_pc));
3701 
3702   }
3703   movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
3704 }
3705 
3706 void MacroAssembler::shlptr(Register dst, int imm8) {
3707   LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
3708 }
3709 
3710 void MacroAssembler::shrptr(Register dst, int imm8) {
3711   LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
3712 }
3713 
3714 void MacroAssembler::sign_extend_byte(Register reg) {
3715   if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
3716     movsbl(reg, reg); // movsxb
3717   } else {
3718     shll(reg, 24);
3719     sarl(reg, 24);
3720   }
3721 }
3722 
3723 void MacroAssembler::sign_extend_short(Register reg) {
3724   if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3725     movswl(reg, reg); // movsxw
3726   } else {
3727     shll(reg, 16);
3728     sarl(reg, 16);
3729   }
3730 }
3731 
3732 void MacroAssembler::testl(Register dst, AddressLiteral src) {
3733   assert(reachable(src), "Address should be reachable");
3734   testl(dst, as_Address(src));
3735 }
3736 
3737 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
3738   int dst_enc = dst->encoding();
3739   int src_enc = src->encoding();
3740   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
3741     Assembler::pcmpeqb(dst, src);
3742   } else if ((dst_enc < 16) && (src_enc < 16)) {
3743     Assembler::pcmpeqb(dst, src);
3744   } else if (src_enc < 16) {
3745     subptr(rsp, 64);
3746     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3747     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
3748     Assembler::pcmpeqb(xmm0, src);
3749     movdqu(dst, xmm0);
3750     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3751     addptr(rsp, 64);
3752   } else if (dst_enc < 16) {
3753     subptr(rsp, 64);
3754     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3755     evmovdqul(xmm0, src, Assembler::AVX_512bit);
3756     Assembler::pcmpeqb(dst, xmm0);
3757     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3758     addptr(rsp, 64);
3759   } else {
3760     subptr(rsp, 64);
3761     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3762     subptr(rsp, 64);
3763     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
3764     movdqu(xmm0, src);
3765     movdqu(xmm1, dst);
3766     Assembler::pcmpeqb(xmm1, xmm0);
3767     movdqu(dst, xmm1);
3768     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
3769     addptr(rsp, 64);
3770     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3771     addptr(rsp, 64);
3772   }
3773 }
3774 
3775 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
3776   int dst_enc = dst->encoding();
3777   int src_enc = src->encoding();
3778   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
3779     Assembler::pcmpeqw(dst, src);
3780   } else if ((dst_enc < 16) && (src_enc < 16)) {
3781     Assembler::pcmpeqw(dst, src);
3782   } else if (src_enc < 16) {
3783     subptr(rsp, 64);
3784     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3785     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
3786     Assembler::pcmpeqw(xmm0, src);
3787     movdqu(dst, xmm0);
3788     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3789     addptr(rsp, 64);
3790   } else if (dst_enc < 16) {
3791     subptr(rsp, 64);
3792     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3793     evmovdqul(xmm0, src, Assembler::AVX_512bit);
3794     Assembler::pcmpeqw(dst, xmm0);
3795     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3796     addptr(rsp, 64);
3797   } else {
3798     subptr(rsp, 64);
3799     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3800     subptr(rsp, 64);
3801     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
3802     movdqu(xmm0, src);
3803     movdqu(xmm1, dst);
3804     Assembler::pcmpeqw(xmm1, xmm0);
3805     movdqu(dst, xmm1);
3806     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
3807     addptr(rsp, 64);
3808     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3809     addptr(rsp, 64);
3810   }
3811 }
3812 
3813 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
3814   int dst_enc = dst->encoding();
3815   if (dst_enc < 16) {
3816     Assembler::pcmpestri(dst, src, imm8);
3817   } else {
3818     subptr(rsp, 64);
3819     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3820     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
3821     Assembler::pcmpestri(xmm0, src, imm8);
3822     movdqu(dst, xmm0);
3823     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3824     addptr(rsp, 64);
3825   }
3826 }
3827 
3828 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
3829   int dst_enc = dst->encoding();
3830   int src_enc = src->encoding();
3831   if ((dst_enc < 16) && (src_enc < 16)) {
3832     Assembler::pcmpestri(dst, src, imm8);
3833   } else if (src_enc < 16) {
3834     subptr(rsp, 64);
3835     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3836     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
3837     Assembler::pcmpestri(xmm0, src, imm8);
3838     movdqu(dst, xmm0);
3839     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3840     addptr(rsp, 64);
3841   } else if (dst_enc < 16) {
3842     subptr(rsp, 64);
3843     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3844     evmovdqul(xmm0, src, Assembler::AVX_512bit);
3845     Assembler::pcmpestri(dst, xmm0, imm8);
3846     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3847     addptr(rsp, 64);
3848   } else {
3849     subptr(rsp, 64);
3850     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3851     subptr(rsp, 64);
3852     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
3853     movdqu(xmm0, src);
3854     movdqu(xmm1, dst);
3855     Assembler::pcmpestri(xmm1, xmm0, imm8);
3856     movdqu(dst, xmm1);
3857     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
3858     addptr(rsp, 64);
3859     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3860     addptr(rsp, 64);
3861   }
3862 }
3863 
3864 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
3865   int dst_enc = dst->encoding();
3866   int src_enc = src->encoding();
3867   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
3868     Assembler::pmovzxbw(dst, src);
3869   } else if ((dst_enc < 16) && (src_enc < 16)) {
3870     Assembler::pmovzxbw(dst, src);
3871   } else if (src_enc < 16) {
3872     subptr(rsp, 64);
3873     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3874     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
3875     Assembler::pmovzxbw(xmm0, src);
3876     movdqu(dst, xmm0);
3877     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3878     addptr(rsp, 64);
3879   } else if (dst_enc < 16) {
3880     subptr(rsp, 64);
3881     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3882     evmovdqul(xmm0, src, Assembler::AVX_512bit);
3883     Assembler::pmovzxbw(dst, xmm0);
3884     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3885     addptr(rsp, 64);
3886   } else {
3887     subptr(rsp, 64);
3888     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3889     subptr(rsp, 64);
3890     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
3891     movdqu(xmm0, src);
3892     movdqu(xmm1, dst);
3893     Assembler::pmovzxbw(xmm1, xmm0);
3894     movdqu(dst, xmm1);
3895     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
3896     addptr(rsp, 64);
3897     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3898     addptr(rsp, 64);
3899   }
3900 }
3901 
3902 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
3903   int dst_enc = dst->encoding();
3904   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
3905     Assembler::pmovzxbw(dst, src);
3906   } else if (dst_enc < 16) {
3907     Assembler::pmovzxbw(dst, src);
3908   } else {
3909     subptr(rsp, 64);
3910     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3911     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
3912     Assembler::pmovzxbw(xmm0, src);
3913     movdqu(dst, xmm0);
3914     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3915     addptr(rsp, 64);
3916   }
3917 }
3918 
3919 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
3920   int src_enc = src->encoding();
3921   if (src_enc < 16) {
3922     Assembler::pmovmskb(dst, src);
3923   } else {
3924     subptr(rsp, 64);
3925     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3926     evmovdqul(xmm0, src, Assembler::AVX_512bit);
3927     Assembler::pmovmskb(dst, xmm0);
3928     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3929     addptr(rsp, 64);
3930   }
3931 }
3932 
3933 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
3934   int dst_enc = dst->encoding();
3935   int src_enc = src->encoding();
3936   if ((dst_enc < 16) && (src_enc < 16)) {
3937     Assembler::ptest(dst, src);
3938   } else if (src_enc < 16) {
3939     subptr(rsp, 64);
3940     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3941     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
3942     Assembler::ptest(xmm0, src);
3943     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3944     addptr(rsp, 64);
3945   } else if (dst_enc < 16) {
3946     subptr(rsp, 64);
3947     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3948     evmovdqul(xmm0, src, Assembler::AVX_512bit);
3949     Assembler::ptest(dst, xmm0);
3950     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3951     addptr(rsp, 64);
3952   } else {
3953     subptr(rsp, 64);
3954     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
3955     subptr(rsp, 64);
3956     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
3957     movdqu(xmm0, src);
3958     movdqu(xmm1, dst);
3959     Assembler::ptest(xmm1, xmm0);
3960     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
3961     addptr(rsp, 64);
3962     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
3963     addptr(rsp, 64);
3964   }
3965 }
3966 
3967 void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
3968   if (reachable(src)) {
3969     Assembler::sqrtsd(dst, as_Address(src));
3970   } else {
3971     lea(rscratch1, src);
3972     Assembler::sqrtsd(dst, Address(rscratch1, 0));
3973   }
3974 }
3975 
3976 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
3977   if (reachable(src)) {
3978     Assembler::sqrtss(dst, as_Address(src));
3979   } else {
3980     lea(rscratch1, src);
3981     Assembler::sqrtss(dst, Address(rscratch1, 0));
3982   }
3983 }
3984 
3985 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
3986   if (reachable(src)) {
3987     Assembler::subsd(dst, as_Address(src));
3988   } else {
3989     lea(rscratch1, src);
3990     Assembler::subsd(dst, Address(rscratch1, 0));
3991   }
3992 }
3993 
3994 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
3995   if (reachable(src)) {
3996     Assembler::subss(dst, as_Address(src));
3997   } else {
3998     lea(rscratch1, src);
3999     Assembler::subss(dst, Address(rscratch1, 0));
4000   }
4001 }
4002 
4003 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
4004   if (reachable(src)) {
4005     Assembler::ucomisd(dst, as_Address(src));
4006   } else {
4007     lea(rscratch1, src);
4008     Assembler::ucomisd(dst, Address(rscratch1, 0));
4009   }
4010 }
4011 
4012 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
4013   if (reachable(src)) {
4014     Assembler::ucomiss(dst, as_Address(src));
4015   } else {
4016     lea(rscratch1, src);
4017     Assembler::ucomiss(dst, Address(rscratch1, 0));
4018   }
4019 }
4020 
4021 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
4022   // Used in sign-bit flipping with aligned address.
4023   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
4024   if (reachable(src)) {
4025     Assembler::xorpd(dst, as_Address(src));
4026   } else {
4027     lea(rscratch1, src);
4028     Assembler::xorpd(dst, Address(rscratch1, 0));
4029   }
4030 }
4031 
4032 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) {
4033   if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) {
4034     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
4035   }
4036   else {
4037     Assembler::xorpd(dst, src);
4038   }
4039 }
4040 
4041 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) {
4042   if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) {
4043     Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit);
4044   } else {
4045     Assembler::xorps(dst, src);
4046   }
4047 }
4048 
4049 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
4050   // Used in sign-bit flipping with aligned address.
4051   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
4052   if (reachable(src)) {
4053     Assembler::xorps(dst, as_Address(src));
4054   } else {
4055     lea(rscratch1, src);
4056     Assembler::xorps(dst, Address(rscratch1, 0));
4057   }
4058 }
4059 
4060 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
4061   // Used in sign-bit flipping with aligned address.
4062   bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
4063   assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
4064   if (reachable(src)) {
4065     Assembler::pshufb(dst, as_Address(src));
4066   } else {
4067     lea(rscratch1, src);
4068     Assembler::pshufb(dst, Address(rscratch1, 0));
4069   }
4070 }
4071 
4072 // AVX 3-operands instructions
4073 
4074 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4075   if (reachable(src)) {
4076     vaddsd(dst, nds, as_Address(src));
4077   } else {
4078     lea(rscratch1, src);
4079     vaddsd(dst, nds, Address(rscratch1, 0));
4080   }
4081 }
4082 
4083 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4084   if (reachable(src)) {
4085     vaddss(dst, nds, as_Address(src));
4086   } else {
4087     lea(rscratch1, src);
4088     vaddss(dst, nds, Address(rscratch1, 0));
4089   }
4090 }
4091 
4092 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) {
4093   int dst_enc = dst->encoding();
4094   int nds_enc = nds->encoding();
4095   int src_enc = src->encoding();
4096   if ((dst_enc < 16) && (nds_enc < 16)) {
4097     vandps(dst, nds, negate_field, vector_len);
4098   } else if ((src_enc < 16) && (dst_enc < 16)) {
4099     movss(src, nds);
4100     vandps(dst, src, negate_field, vector_len);
4101   } else if (src_enc < 16) {
4102     movss(src, nds);
4103     vandps(src, src, negate_field, vector_len);
4104     movss(dst, src);
4105   } else if (dst_enc < 16) {
4106     movdqu(src, xmm0);
4107     movss(xmm0, nds);
4108     vandps(dst, xmm0, negate_field, vector_len);
4109     movdqu(xmm0, src);
4110   } else if (nds_enc < 16) {
4111     movdqu(src, xmm0);
4112     vandps(xmm0, nds, negate_field, vector_len);
4113     movss(dst, xmm0);
4114     movdqu(xmm0, src);
4115   } else {
4116     movdqu(src, xmm0);
4117     movss(xmm0, nds);
4118     vandps(xmm0, xmm0, negate_field, vector_len);
4119     movss(dst, xmm0);
4120     movdqu(xmm0, src);
4121   }
4122 }
4123 
4124 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) {
4125   int dst_enc = dst->encoding();
4126   int nds_enc = nds->encoding();
4127   int src_enc = src->encoding();
4128   if ((dst_enc < 16) && (nds_enc < 16)) {
4129     vandpd(dst, nds, negate_field, vector_len);
4130   } else if ((src_enc < 16) && (dst_enc < 16)) {
4131     movsd(src, nds);
4132     vandpd(dst, src, negate_field, vector_len);
4133   } else if (src_enc < 16) {
4134     movsd(src, nds);
4135     vandpd(src, src, negate_field, vector_len);
4136     movsd(dst, src);
4137   } else if (dst_enc < 16) {
4138     movdqu(src, xmm0);
4139     movsd(xmm0, nds);
4140     vandpd(dst, xmm0, negate_field, vector_len);
4141     movdqu(xmm0, src);
4142   } else if (nds_enc < 16) {
4143     movdqu(src, xmm0);
4144     vandpd(xmm0, nds, negate_field, vector_len);
4145     movsd(dst, xmm0);
4146     movdqu(xmm0, src);
4147   } else {
4148     movdqu(src, xmm0);
4149     movsd(xmm0, nds);
4150     vandpd(xmm0, xmm0, negate_field, vector_len);
4151     movsd(dst, xmm0);
4152     movdqu(xmm0, src);
4153   }
4154 }
4155 
4156 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4157   int dst_enc = dst->encoding();
4158   int nds_enc = nds->encoding();
4159   int src_enc = src->encoding();
4160   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4161     Assembler::vpaddb(dst, nds, src, vector_len);
4162   } else if ((dst_enc < 16) && (src_enc < 16)) {
4163     Assembler::vpaddb(dst, dst, src, vector_len);
4164   } else if ((dst_enc < 16) && (nds_enc < 16)) {
4165     // use nds as scratch for src
4166     evmovdqul(nds, src, Assembler::AVX_512bit);
4167     Assembler::vpaddb(dst, dst, nds, vector_len);
4168   } else if ((src_enc < 16) && (nds_enc < 16)) {
4169     // use nds as scratch for dst
4170     evmovdqul(nds, dst, Assembler::AVX_512bit);
4171     Assembler::vpaddb(nds, nds, src, vector_len);
4172     evmovdqul(dst, nds, Assembler::AVX_512bit);
4173   } else if (dst_enc < 16) {
4174     // use nds as scatch for xmm0 to hold src
4175     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4176     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4177     Assembler::vpaddb(dst, dst, xmm0, vector_len);
4178     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4179   } else {
4180     // worse case scenario, all regs are in the upper bank
4181     subptr(rsp, 64);
4182     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4183     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4184     evmovdqul(xmm1, src, Assembler::AVX_512bit);
4185     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4186     Assembler::vpaddb(xmm0, xmm0, xmm1, vector_len);
4187     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4188     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4189     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4190     addptr(rsp, 64);
4191   }
4192 }
4193 
4194 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4195   int dst_enc = dst->encoding();
4196   int nds_enc = nds->encoding();
4197   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4198     Assembler::vpaddb(dst, nds, src, vector_len);
4199   } else if (dst_enc < 16) {
4200     Assembler::vpaddb(dst, dst, src, vector_len);
4201   } else if (nds_enc < 16) {
4202     // implies dst_enc in upper bank with src as scratch
4203     evmovdqul(nds, dst, Assembler::AVX_512bit);
4204     Assembler::vpaddb(nds, nds, src, vector_len);
4205     evmovdqul(dst, nds, Assembler::AVX_512bit);
4206   } else {
4207     // worse case scenario, all regs in upper bank
4208     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4209     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4210     Assembler::vpaddb(xmm0, xmm0, src, vector_len);
4211     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4212   }
4213 }
4214 
4215 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4216   int dst_enc = dst->encoding();
4217   int nds_enc = nds->encoding();
4218   int src_enc = src->encoding();
4219   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4220     Assembler::vpaddw(dst, nds, src, vector_len);
4221   } else if ((dst_enc < 16) && (src_enc < 16)) {
4222     Assembler::vpaddw(dst, dst, src, vector_len);
4223   } else if ((dst_enc < 16) && (nds_enc < 16)) {
4224     // use nds as scratch for src
4225     evmovdqul(nds, src, Assembler::AVX_512bit);
4226     Assembler::vpaddw(dst, dst, nds, vector_len);
4227   } else if ((src_enc < 16) && (nds_enc < 16)) {
4228     // use nds as scratch for dst
4229     evmovdqul(nds, dst, Assembler::AVX_512bit);
4230     Assembler::vpaddw(nds, nds, src, vector_len);
4231     evmovdqul(dst, nds, Assembler::AVX_512bit);
4232   } else if (dst_enc < 16) {
4233     // use nds as scatch for xmm0 to hold src
4234     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4235     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4236     Assembler::vpaddw(dst, dst, xmm0, vector_len);
4237     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4238   } else {
4239     // worse case scenario, all regs are in the upper bank
4240     subptr(rsp, 64);
4241     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4242     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4243     evmovdqul(xmm1, src, Assembler::AVX_512bit);
4244     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4245     Assembler::vpaddw(xmm0, xmm0, xmm1, vector_len);
4246     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4247     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4248     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4249     addptr(rsp, 64);
4250   }
4251 }
4252 
4253 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4254   int dst_enc = dst->encoding();
4255   int nds_enc = nds->encoding();
4256   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4257     Assembler::vpaddw(dst, nds, src, vector_len);
4258   } else if (dst_enc < 16) {
4259     Assembler::vpaddw(dst, dst, src, vector_len);
4260   } else if (nds_enc < 16) {
4261     // implies dst_enc in upper bank with src as scratch
4262     evmovdqul(nds, dst, Assembler::AVX_512bit);
4263     Assembler::vpaddw(nds, nds, src, vector_len);
4264     evmovdqul(dst, nds, Assembler::AVX_512bit);
4265   } else {
4266     // worse case scenario, all regs in upper bank
4267     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4268     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4269     Assembler::vpaddw(xmm0, xmm0, src, vector_len);
4270     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4271   }
4272 }
4273 
4274 void MacroAssembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
4275   int dst_enc = dst->encoding();
4276   int src_enc = src->encoding();
4277   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4278     Assembler::vpbroadcastw(dst, src);
4279   } else if ((dst_enc < 16) && (src_enc < 16)) {
4280     Assembler::vpbroadcastw(dst, src);
4281   } else if (src_enc < 16) {
4282     subptr(rsp, 64);
4283     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4284     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4285     Assembler::vpbroadcastw(xmm0, src);
4286     movdqu(dst, xmm0);
4287     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4288     addptr(rsp, 64);
4289   } else if (dst_enc < 16) {
4290     subptr(rsp, 64);
4291     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4292     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4293     Assembler::vpbroadcastw(dst, xmm0);
4294     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4295     addptr(rsp, 64);
4296   } else {
4297     subptr(rsp, 64);
4298     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4299     subptr(rsp, 64);
4300     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4301     movdqu(xmm0, src);
4302     movdqu(xmm1, dst);
4303     Assembler::vpbroadcastw(xmm1, xmm0);
4304     movdqu(dst, xmm1);
4305     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4306     addptr(rsp, 64);
4307     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4308     addptr(rsp, 64);
4309   }
4310 }
4311 
4312 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4313   int dst_enc = dst->encoding();
4314   int nds_enc = nds->encoding();
4315   int src_enc = src->encoding();
4316   assert(dst_enc == nds_enc, "");
4317   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4318     Assembler::vpcmpeqb(dst, nds, src, vector_len);
4319   } else if ((dst_enc < 16) && (src_enc < 16)) {
4320     Assembler::vpcmpeqb(dst, nds, src, vector_len);
4321   } else if (src_enc < 16) {
4322     subptr(rsp, 64);
4323     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4324     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4325     Assembler::vpcmpeqb(xmm0, xmm0, src, vector_len);
4326     movdqu(dst, xmm0);
4327     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4328     addptr(rsp, 64);
4329   } else if (dst_enc < 16) {
4330     subptr(rsp, 64);
4331     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4332     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4333     Assembler::vpcmpeqb(dst, dst, xmm0, vector_len);
4334     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4335     addptr(rsp, 64);
4336   } else {
4337     subptr(rsp, 64);
4338     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4339     subptr(rsp, 64);
4340     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4341     movdqu(xmm0, src);
4342     movdqu(xmm1, dst);
4343     Assembler::vpcmpeqb(xmm1, xmm1, xmm0, vector_len);
4344     movdqu(dst, xmm1);
4345     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4346     addptr(rsp, 64);
4347     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4348     addptr(rsp, 64);
4349   }
4350 }
4351 
4352 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4353   int dst_enc = dst->encoding();
4354   int nds_enc = nds->encoding();
4355   int src_enc = src->encoding();
4356   assert(dst_enc == nds_enc, "");
4357   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4358     Assembler::vpcmpeqw(dst, nds, src, vector_len);
4359   } else if ((dst_enc < 16) && (src_enc < 16)) {
4360     Assembler::vpcmpeqw(dst, nds, src, vector_len);
4361   } else if (src_enc < 16) {
4362     subptr(rsp, 64);
4363     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4364     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4365     Assembler::vpcmpeqw(xmm0, xmm0, src, vector_len);
4366     movdqu(dst, xmm0);
4367     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4368     addptr(rsp, 64);
4369   } else if (dst_enc < 16) {
4370     subptr(rsp, 64);
4371     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4372     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4373     Assembler::vpcmpeqw(dst, dst, xmm0, vector_len);
4374     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4375     addptr(rsp, 64);
4376   } else {
4377     subptr(rsp, 64);
4378     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4379     subptr(rsp, 64);
4380     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4381     movdqu(xmm0, src);
4382     movdqu(xmm1, dst);
4383     Assembler::vpcmpeqw(xmm1, xmm1, xmm0, vector_len);
4384     movdqu(dst, xmm1);
4385     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4386     addptr(rsp, 64);
4387     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4388     addptr(rsp, 64);
4389   }
4390 }
4391 
4392 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
4393   int dst_enc = dst->encoding();
4394   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4395     Assembler::vpmovzxbw(dst, src, vector_len);
4396   } else if (dst_enc < 16) {
4397     Assembler::vpmovzxbw(dst, src, vector_len);
4398   } else {
4399     subptr(rsp, 64);
4400     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4401     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4402     Assembler::vpmovzxbw(xmm0, src, vector_len);
4403     movdqu(dst, xmm0);
4404     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4405     addptr(rsp, 64);
4406   }
4407 }
4408 
4409 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src) {
4410   int src_enc = src->encoding();
4411   if (src_enc < 16) {
4412     Assembler::vpmovmskb(dst, src);
4413   } else {
4414     subptr(rsp, 64);
4415     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4416     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4417     Assembler::vpmovmskb(dst, xmm0);
4418     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4419     addptr(rsp, 64);
4420   }
4421 }
4422 
4423 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4424   int dst_enc = dst->encoding();
4425   int nds_enc = nds->encoding();
4426   int src_enc = src->encoding();
4427   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4428     Assembler::vpmullw(dst, nds, src, vector_len);
4429   } else if ((dst_enc < 16) && (src_enc < 16)) {
4430     Assembler::vpmullw(dst, dst, src, vector_len);
4431   } else if ((dst_enc < 16) && (nds_enc < 16)) {
4432     // use nds as scratch for src
4433     evmovdqul(nds, src, Assembler::AVX_512bit);
4434     Assembler::vpmullw(dst, dst, nds, vector_len);
4435   } else if ((src_enc < 16) && (nds_enc < 16)) {
4436     // use nds as scratch for dst
4437     evmovdqul(nds, dst, Assembler::AVX_512bit);
4438     Assembler::vpmullw(nds, nds, src, vector_len);
4439     evmovdqul(dst, nds, Assembler::AVX_512bit);
4440   } else if (dst_enc < 16) {
4441     // use nds as scatch for xmm0 to hold src
4442     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4443     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4444     Assembler::vpmullw(dst, dst, xmm0, vector_len);
4445     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4446   } else {
4447     // worse case scenario, all regs are in the upper bank
4448     subptr(rsp, 64);
4449     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4450     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4451     evmovdqul(xmm1, src, Assembler::AVX_512bit);
4452     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4453     Assembler::vpmullw(xmm0, xmm0, xmm1, vector_len);
4454     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4455     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4456     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4457     addptr(rsp, 64);
4458   }
4459 }
4460 
4461 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4462   int dst_enc = dst->encoding();
4463   int nds_enc = nds->encoding();
4464   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4465     Assembler::vpmullw(dst, nds, src, vector_len);
4466   } else if (dst_enc < 16) {
4467     Assembler::vpmullw(dst, dst, src, vector_len);
4468   } else if (nds_enc < 16) {
4469     // implies dst_enc in upper bank with src as scratch
4470     evmovdqul(nds, dst, Assembler::AVX_512bit);
4471     Assembler::vpmullw(nds, nds, src, vector_len);
4472     evmovdqul(dst, nds, Assembler::AVX_512bit);
4473   } else {
4474     // worse case scenario, all regs in upper bank
4475     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4476     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4477     Assembler::vpmullw(xmm0, xmm0, src, vector_len);
4478     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4479   }
4480 }
4481 
4482 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4483   int dst_enc = dst->encoding();
4484   int nds_enc = nds->encoding();
4485   int src_enc = src->encoding();
4486   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4487     Assembler::vpsubb(dst, nds, src, vector_len);
4488   } else if ((dst_enc < 16) && (src_enc < 16)) {
4489     Assembler::vpsubb(dst, dst, src, vector_len);
4490   } else if ((dst_enc < 16) && (nds_enc < 16)) {
4491     // use nds as scratch for src
4492     evmovdqul(nds, src, Assembler::AVX_512bit);
4493     Assembler::vpsubb(dst, dst, nds, vector_len);
4494   } else if ((src_enc < 16) && (nds_enc < 16)) {
4495     // use nds as scratch for dst
4496     evmovdqul(nds, dst, Assembler::AVX_512bit);
4497     Assembler::vpsubb(nds, nds, src, vector_len);
4498     evmovdqul(dst, nds, Assembler::AVX_512bit);
4499   } else if (dst_enc < 16) {
4500     // use nds as scatch for xmm0 to hold src
4501     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4502     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4503     Assembler::vpsubb(dst, dst, xmm0, vector_len);
4504     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4505   } else {
4506     // worse case scenario, all regs are in the upper bank
4507     subptr(rsp, 64);
4508     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4509     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4510     evmovdqul(xmm1, src, Assembler::AVX_512bit);
4511     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4512     Assembler::vpsubb(xmm0, xmm0, xmm1, vector_len);
4513     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4514     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4515     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4516     addptr(rsp, 64);
4517   }
4518 }
4519 
4520 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4521   int dst_enc = dst->encoding();
4522   int nds_enc = nds->encoding();
4523   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4524     Assembler::vpsubb(dst, nds, src, vector_len);
4525   } else if (dst_enc < 16) {
4526     Assembler::vpsubb(dst, dst, src, vector_len);
4527   } else if (nds_enc < 16) {
4528     // implies dst_enc in upper bank with src as scratch
4529     evmovdqul(nds, dst, Assembler::AVX_512bit);
4530     Assembler::vpsubb(nds, nds, src, vector_len);
4531     evmovdqul(dst, nds, Assembler::AVX_512bit);
4532   } else {
4533     // worse case scenario, all regs in upper bank
4534     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4535     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4536     Assembler::vpsubw(xmm0, xmm0, src, vector_len);
4537     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4538   }
4539 }
4540 
4541 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4542   int dst_enc = dst->encoding();
4543   int nds_enc = nds->encoding();
4544   int src_enc = src->encoding();
4545   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4546     Assembler::vpsubw(dst, nds, src, vector_len);
4547   } else if ((dst_enc < 16) && (src_enc < 16)) {
4548     Assembler::vpsubw(dst, dst, src, vector_len);
4549   } else if ((dst_enc < 16) && (nds_enc < 16)) {
4550     // use nds as scratch for src
4551     evmovdqul(nds, src, Assembler::AVX_512bit);
4552     Assembler::vpsubw(dst, dst, nds, vector_len);
4553   } else if ((src_enc < 16) && (nds_enc < 16)) {
4554     // use nds as scratch for dst
4555     evmovdqul(nds, dst, Assembler::AVX_512bit);
4556     Assembler::vpsubw(nds, nds, src, vector_len);
4557     evmovdqul(dst, nds, Assembler::AVX_512bit);
4558   } else if (dst_enc < 16) {
4559     // use nds as scatch for xmm0 to hold src
4560     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4561     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4562     Assembler::vpsubw(dst, dst, xmm0, vector_len);
4563     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4564   } else {
4565     // worse case scenario, all regs are in the upper bank
4566     subptr(rsp, 64);
4567     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4568     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4569     evmovdqul(xmm1, src, Assembler::AVX_512bit);
4570     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4571     Assembler::vpsubw(xmm0, xmm0, xmm1, vector_len);
4572     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4573     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4574     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4575     addptr(rsp, 64);
4576   }
4577 }
4578 
4579 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
4580   int dst_enc = dst->encoding();
4581   int nds_enc = nds->encoding();
4582   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4583     Assembler::vpsubw(dst, nds, src, vector_len);
4584   } else if (dst_enc < 16) {
4585     Assembler::vpsubw(dst, dst, src, vector_len);
4586   } else if (nds_enc < 16) {
4587     // implies dst_enc in upper bank with src as scratch
4588     evmovdqul(nds, dst, Assembler::AVX_512bit);
4589     Assembler::vpsubw(nds, nds, src, vector_len);
4590     evmovdqul(dst, nds, Assembler::AVX_512bit);
4591   } else {
4592     // worse case scenario, all regs in upper bank
4593     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4594     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4595     Assembler::vpsubw(xmm0, xmm0, src, vector_len);
4596     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4597   }
4598 }
4599 
4600 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
4601   int dst_enc = dst->encoding();
4602   int nds_enc = nds->encoding();
4603   int shift_enc = shift->encoding();
4604   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4605     Assembler::vpsraw(dst, nds, shift, vector_len);
4606   } else if ((dst_enc < 16) && (shift_enc < 16)) {
4607     Assembler::vpsraw(dst, dst, shift, vector_len);
4608   } else if ((dst_enc < 16) && (nds_enc < 16)) {
4609     // use nds_enc as scratch with shift
4610     evmovdqul(nds, shift, Assembler::AVX_512bit);
4611     Assembler::vpsraw(dst, dst, nds, vector_len);
4612   } else if ((shift_enc < 16) && (nds_enc < 16)) {
4613     // use nds as scratch with dst
4614     evmovdqul(nds, dst, Assembler::AVX_512bit);
4615     Assembler::vpsraw(nds, nds, shift, vector_len);
4616     evmovdqul(dst, nds, Assembler::AVX_512bit);
4617   } else if (dst_enc < 16) {
4618     // use nds to save a copy of xmm0 and hold shift
4619     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4620     evmovdqul(xmm0, shift, Assembler::AVX_512bit);
4621     Assembler::vpsraw(dst, dst, xmm0, vector_len);
4622     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4623   } else if (nds_enc < 16) {
4624     // use nds as dest as temps
4625     evmovdqul(nds, dst, Assembler::AVX_512bit);
4626     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4627     evmovdqul(xmm0, shift, Assembler::AVX_512bit);
4628     Assembler::vpsraw(nds, nds, xmm0, vector_len);
4629     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4630     evmovdqul(dst, nds, Assembler::AVX_512bit);
4631   } else {
4632     // worse case scenario, all regs are in the upper bank
4633     subptr(rsp, 64);
4634     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4635     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4636     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
4637     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4638     Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len);
4639     evmovdqul(xmm1, dst, Assembler::AVX_512bit);
4640     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4641     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4642     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4643     addptr(rsp, 64);
4644   }
4645 }
4646 
4647 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
4648   int dst_enc = dst->encoding();
4649   int nds_enc = nds->encoding();
4650   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4651     Assembler::vpsraw(dst, nds, shift, vector_len);
4652   } else if (dst_enc < 16) {
4653     Assembler::vpsraw(dst, dst, shift, vector_len);
4654   } else if (nds_enc < 16) {
4655     // use nds as scratch
4656     evmovdqul(nds, dst, Assembler::AVX_512bit);
4657     Assembler::vpsraw(nds, nds, shift, vector_len);
4658     evmovdqul(dst, nds, Assembler::AVX_512bit);
4659   } else {
4660     // use nds as scratch for xmm0
4661     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4662     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4663     Assembler::vpsraw(xmm0, xmm0, shift, vector_len);
4664     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4665   }
4666 }
4667 
4668 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
4669   int dst_enc = dst->encoding();
4670   int nds_enc = nds->encoding();
4671   int shift_enc = shift->encoding();
4672   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4673     Assembler::vpsrlw(dst, nds, shift, vector_len);
4674   } else if ((dst_enc < 16) && (shift_enc < 16)) {
4675     Assembler::vpsrlw(dst, dst, shift, vector_len);
4676   } else if ((dst_enc < 16) && (nds_enc < 16)) {
4677     // use nds_enc as scratch with shift
4678     evmovdqul(nds, shift, Assembler::AVX_512bit);
4679     Assembler::vpsrlw(dst, dst, nds, vector_len);
4680   } else if ((shift_enc < 16) && (nds_enc < 16)) {
4681     // use nds as scratch with dst
4682     evmovdqul(nds, dst, Assembler::AVX_512bit);
4683     Assembler::vpsrlw(nds, nds, shift, vector_len);
4684     evmovdqul(dst, nds, Assembler::AVX_512bit);
4685   } else if (dst_enc < 16) {
4686     // use nds to save a copy of xmm0 and hold shift
4687     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4688     evmovdqul(xmm0, shift, Assembler::AVX_512bit);
4689     Assembler::vpsrlw(dst, dst, xmm0, vector_len);
4690     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4691   } else if (nds_enc < 16) {
4692     // use nds as dest as temps
4693     evmovdqul(nds, dst, Assembler::AVX_512bit);
4694     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4695     evmovdqul(xmm0, shift, Assembler::AVX_512bit);
4696     Assembler::vpsrlw(nds, nds, xmm0, vector_len);
4697     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4698     evmovdqul(dst, nds, Assembler::AVX_512bit);
4699   } else {
4700     // worse case scenario, all regs are in the upper bank
4701     subptr(rsp, 64);
4702     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4703     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4704     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
4705     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4706     Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len);
4707     evmovdqul(xmm1, dst, Assembler::AVX_512bit);
4708     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4709     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4710     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4711     addptr(rsp, 64);
4712   }
4713 }
4714 
4715 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
4716   int dst_enc = dst->encoding();
4717   int nds_enc = nds->encoding();
4718   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4719     Assembler::vpsrlw(dst, nds, shift, vector_len);
4720   } else if (dst_enc < 16) {
4721     Assembler::vpsrlw(dst, dst, shift, vector_len);
4722   } else if (nds_enc < 16) {
4723     // use nds as scratch
4724     evmovdqul(nds, dst, Assembler::AVX_512bit);
4725     Assembler::vpsrlw(nds, nds, shift, vector_len);
4726     evmovdqul(dst, nds, Assembler::AVX_512bit);
4727   } else {
4728     // use nds as scratch for xmm0
4729     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4730     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4731     Assembler::vpsrlw(xmm0, xmm0, shift, vector_len);
4732     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4733   }
4734 }
4735 
4736 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) {
4737   int dst_enc = dst->encoding();
4738   int nds_enc = nds->encoding();
4739   int shift_enc = shift->encoding();
4740   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4741     Assembler::vpsllw(dst, nds, shift, vector_len);
4742   } else if ((dst_enc < 16) && (shift_enc < 16)) {
4743     Assembler::vpsllw(dst, dst, shift, vector_len);
4744   } else if ((dst_enc < 16) && (nds_enc < 16)) {
4745     // use nds_enc as scratch with shift
4746     evmovdqul(nds, shift, Assembler::AVX_512bit);
4747     Assembler::vpsllw(dst, dst, nds, vector_len);
4748   } else if ((shift_enc < 16) && (nds_enc < 16)) {
4749     // use nds as scratch with dst
4750     evmovdqul(nds, dst, Assembler::AVX_512bit);
4751     Assembler::vpsllw(nds, nds, shift, vector_len);
4752     evmovdqul(dst, nds, Assembler::AVX_512bit);
4753   } else if (dst_enc < 16) {
4754     // use nds to save a copy of xmm0 and hold shift
4755     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4756     evmovdqul(xmm0, shift, Assembler::AVX_512bit);
4757     Assembler::vpsllw(dst, dst, xmm0, vector_len);
4758     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4759   } else if (nds_enc < 16) {
4760     // use nds as dest as temps
4761     evmovdqul(nds, dst, Assembler::AVX_512bit);
4762     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4763     evmovdqul(xmm0, shift, Assembler::AVX_512bit);
4764     Assembler::vpsllw(nds, nds, xmm0, vector_len);
4765     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4766     evmovdqul(dst, nds, Assembler::AVX_512bit);
4767   } else {
4768     // worse case scenario, all regs are in the upper bank
4769     subptr(rsp, 64);
4770     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4771     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4772     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
4773     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4774     Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len);
4775     evmovdqul(xmm1, dst, Assembler::AVX_512bit);
4776     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4777     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4778     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4779     addptr(rsp, 64);
4780   }
4781 }
4782 
4783 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
4784   int dst_enc = dst->encoding();
4785   int nds_enc = nds->encoding();
4786   if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
4787     Assembler::vpsllw(dst, nds, shift, vector_len);
4788   } else if (dst_enc < 16) {
4789     Assembler::vpsllw(dst, dst, shift, vector_len);
4790   } else if (nds_enc < 16) {
4791     // use nds as scratch
4792     evmovdqul(nds, dst, Assembler::AVX_512bit);
4793     Assembler::vpsllw(nds, nds, shift, vector_len);
4794     evmovdqul(dst, nds, Assembler::AVX_512bit);
4795   } else {
4796     // use nds as scratch for xmm0
4797     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
4798     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4799     Assembler::vpsllw(xmm0, xmm0, shift, vector_len);
4800     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
4801   }
4802 }
4803 
4804 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
4805   int dst_enc = dst->encoding();
4806   int src_enc = src->encoding();
4807   if ((dst_enc < 16) && (src_enc < 16)) {
4808     Assembler::vptest(dst, src);
4809   } else if (src_enc < 16) {
4810     subptr(rsp, 64);
4811     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4812     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4813     Assembler::vptest(xmm0, src);
4814     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4815     addptr(rsp, 64);
4816   } else if (dst_enc < 16) {
4817     subptr(rsp, 64);
4818     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4819     evmovdqul(xmm0, src, Assembler::AVX_512bit);
4820     Assembler::vptest(dst, xmm0);
4821     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4822     addptr(rsp, 64);
4823   } else {
4824     subptr(rsp, 64);
4825     evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4826     subptr(rsp, 64);
4827     evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4828     movdqu(xmm0, src);
4829     movdqu(xmm1, dst);
4830     Assembler::vptest(xmm1, xmm0);
4831     evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4832     addptr(rsp, 64);
4833     evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4834     addptr(rsp, 64);
4835   }
4836 }
4837 
4838 // This instruction exists within macros, ergo we cannot control its input
4839 // when emitted through those patterns.
4840 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
4841   if (VM_Version::supports_avx512nobw()) {
4842     int dst_enc = dst->encoding();
4843     int src_enc = src->encoding();
4844     if (dst_enc == src_enc) {
4845       if (dst_enc < 16) {
4846         Assembler::punpcklbw(dst, src);
4847       } else {
4848         subptr(rsp, 64);
4849         evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4850         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4851         Assembler::punpcklbw(xmm0, xmm0);
4852         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4853         evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4854         addptr(rsp, 64);
4855       }
4856     } else {
4857       if ((src_enc < 16) && (dst_enc < 16)) {
4858         Assembler::punpcklbw(dst, src);
4859       } else if (src_enc < 16) {
4860         subptr(rsp, 64);
4861         evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4862         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4863         Assembler::punpcklbw(xmm0, src);
4864         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4865         evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4866         addptr(rsp, 64);
4867       } else if (dst_enc < 16) {
4868         subptr(rsp, 64);
4869         evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4870         evmovdqul(xmm0, src, Assembler::AVX_512bit);
4871         Assembler::punpcklbw(dst, xmm0);
4872         evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4873         addptr(rsp, 64);
4874       } else {
4875         subptr(rsp, 64);
4876         evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4877         subptr(rsp, 64);
4878         evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4879         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4880         evmovdqul(xmm1, src, Assembler::AVX_512bit);
4881         Assembler::punpcklbw(xmm0, xmm1);
4882         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4883         evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4884         addptr(rsp, 64);
4885         evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4886         addptr(rsp, 64);
4887       }
4888     }
4889   } else {
4890     Assembler::punpcklbw(dst, src);
4891   }
4892 }
4893 
4894 // This instruction exists within macros, ergo we cannot control its input
4895 // when emitted through those patterns.
4896 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
4897   if (VM_Version::supports_avx512nobw()) {
4898     int dst_enc = dst->encoding();
4899     int src_enc = src->encoding();
4900     if (dst_enc == src_enc) {
4901       if (dst_enc < 16) {
4902         Assembler::pshuflw(dst, src, mode);
4903       } else {
4904         subptr(rsp, 64);
4905         evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4906         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4907         Assembler::pshuflw(xmm0, xmm0, mode);
4908         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4909         evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4910         addptr(rsp, 64);
4911       }
4912     } else {
4913       if ((src_enc < 16) && (dst_enc < 16)) {
4914         Assembler::pshuflw(dst, src, mode);
4915       } else if (src_enc < 16) {
4916         subptr(rsp, 64);
4917         evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4918         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4919         Assembler::pshuflw(xmm0, src, mode);
4920         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4921         evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4922         addptr(rsp, 64);
4923       } else if (dst_enc < 16) {
4924         subptr(rsp, 64);
4925         evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4926         evmovdqul(xmm0, src, Assembler::AVX_512bit);
4927         Assembler::pshuflw(dst, xmm0, mode);
4928         evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4929         addptr(rsp, 64);
4930       } else {
4931         subptr(rsp, 64);
4932         evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
4933         subptr(rsp, 64);
4934         evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
4935         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
4936         evmovdqul(xmm1, src, Assembler::AVX_512bit);
4937         Assembler::pshuflw(xmm0, xmm1, mode);
4938         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
4939         evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
4940         addptr(rsp, 64);
4941         evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
4942         addptr(rsp, 64);
4943       }
4944     }
4945   } else {
4946     Assembler::pshuflw(dst, src, mode);
4947   }
4948 }
4949 
4950 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
4951   if (reachable(src)) {
4952     vandpd(dst, nds, as_Address(src), vector_len);
4953   } else {
4954     lea(rscratch1, src);
4955     vandpd(dst, nds, Address(rscratch1, 0), vector_len);
4956   }
4957 }
4958 
4959 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
4960   if (reachable(src)) {
4961     vandps(dst, nds, as_Address(src), vector_len);
4962   } else {
4963     lea(rscratch1, src);
4964     vandps(dst, nds, Address(rscratch1, 0), vector_len);
4965   }
4966 }
4967 
4968 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4969   if (reachable(src)) {
4970     vdivsd(dst, nds, as_Address(src));
4971   } else {
4972     lea(rscratch1, src);
4973     vdivsd(dst, nds, Address(rscratch1, 0));
4974   }
4975 }
4976 
4977 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4978   if (reachable(src)) {
4979     vdivss(dst, nds, as_Address(src));
4980   } else {
4981     lea(rscratch1, src);
4982     vdivss(dst, nds, Address(rscratch1, 0));
4983   }
4984 }
4985 
4986 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4987   if (reachable(src)) {
4988     vmulsd(dst, nds, as_Address(src));
4989   } else {
4990     lea(rscratch1, src);
4991     vmulsd(dst, nds, Address(rscratch1, 0));
4992   }
4993 }
4994 
4995 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4996   if (reachable(src)) {
4997     vmulss(dst, nds, as_Address(src));
4998   } else {
4999     lea(rscratch1, src);
5000     vmulss(dst, nds, Address(rscratch1, 0));
5001   }
5002 }
5003 
5004 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
5005   if (reachable(src)) {
5006     vsubsd(dst, nds, as_Address(src));
5007   } else {
5008     lea(rscratch1, src);
5009     vsubsd(dst, nds, Address(rscratch1, 0));
5010   }
5011 }
5012 
5013 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
5014   if (reachable(src)) {
5015     vsubss(dst, nds, as_Address(src));
5016   } else {
5017     lea(rscratch1, src);
5018     vsubss(dst, nds, Address(rscratch1, 0));
5019   }
5020 }
5021 
5022 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
5023   int nds_enc = nds->encoding();
5024   int dst_enc = dst->encoding();
5025   bool dst_upper_bank = (dst_enc > 15);
5026   bool nds_upper_bank = (nds_enc > 15);
5027   if (VM_Version::supports_avx512novl() &&
5028       (nds_upper_bank || dst_upper_bank)) {
5029     if (dst_upper_bank) {
5030       subptr(rsp, 64);
5031       evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
5032       movflt(xmm0, nds);
5033       vxorps(xmm0, xmm0, src, Assembler::AVX_128bit);
5034       movflt(dst, xmm0);
5035       evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
5036       addptr(rsp, 64);
5037     } else {
5038       movflt(dst, nds);
5039       vxorps(dst, dst, src, Assembler::AVX_128bit);
5040     }
5041   } else {
5042     vxorps(dst, nds, src, Assembler::AVX_128bit);
5043   }
5044 }
5045 
5046 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
5047   int nds_enc = nds->encoding();
5048   int dst_enc = dst->encoding();
5049   bool dst_upper_bank = (dst_enc > 15);
5050   bool nds_upper_bank = (nds_enc > 15);
5051   if (VM_Version::supports_avx512novl() &&
5052       (nds_upper_bank || dst_upper_bank)) {
5053     if (dst_upper_bank) {
5054       subptr(rsp, 64);
5055       evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
5056       movdbl(xmm0, nds);
5057       vxorpd(xmm0, xmm0, src, Assembler::AVX_128bit);
5058       movdbl(dst, xmm0);
5059       evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
5060       addptr(rsp, 64);
5061     } else {
5062       movdbl(dst, nds);
5063       vxorpd(dst, dst, src, Assembler::AVX_128bit);
5064     }
5065   } else {
5066     vxorpd(dst, nds, src, Assembler::AVX_128bit);
5067   }
5068 }
5069 
5070 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
5071   if (reachable(src)) {
5072     vxorpd(dst, nds, as_Address(src), vector_len);
5073   } else {
5074     lea(rscratch1, src);
5075     vxorpd(dst, nds, Address(rscratch1, 0), vector_len);
5076   }
5077 }
5078 
5079 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) {
5080   if (reachable(src)) {
5081     vxorps(dst, nds, as_Address(src), vector_len);
5082   } else {
5083     lea(rscratch1, src);
5084     vxorps(dst, nds, Address(rscratch1, 0), vector_len);
5085   }
5086 }
5087 
5088 
5089 //////////////////////////////////////////////////////////////////////////////////
5090 #if INCLUDE_ALL_GCS
5091 
5092 void MacroAssembler::g1_write_barrier_pre(Register obj,
5093                                           Register pre_val,
5094                                           Register thread,
5095                                           Register tmp,
5096                                           bool tosca_live,
5097                                           bool expand_call) {
5098 
5099   // If expand_call is true then we expand the call_VM_leaf macro
5100   // directly to skip generating the check by
5101   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
5102 
5103 #ifdef _LP64
5104   assert(thread == r15_thread, "must be");
5105 #endif // _LP64
5106 
5107   Label done;
5108   Label runtime;
5109 
5110   assert(pre_val != noreg, "check this code");
5111 
5112   if (obj != noreg) {
5113     assert_different_registers(obj, pre_val, tmp);
5114     assert(pre_val != rax, "check this code");
5115   }
5116 
5117   Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
5118                                        SATBMarkQueue::byte_offset_of_active()));
5119   Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
5120                                        SATBMarkQueue::byte_offset_of_index()));
5121   Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
5122                                        SATBMarkQueue::byte_offset_of_buf()));
5123 
5124 
5125   // Is marking active?
5126   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
5127     cmpl(in_progress, 0);
5128   } else {
5129     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
5130     cmpb(in_progress, 0);
5131   }
5132   jcc(Assembler::equal, done);
5133 
5134   // Do we need to load the previous value?
5135   if (obj != noreg) {
5136     load_heap_oop(pre_val, Address(obj, 0));
5137   }
5138 
5139   // Is the previous value null?
5140   cmpptr(pre_val, (int32_t) NULL_WORD);
5141   jcc(Assembler::equal, done);
5142 
5143   // Can we store original value in the thread's buffer?
5144   // Is index == 0?
5145   // (The index field is typed as size_t.)
5146 
5147   movptr(tmp, index);                   // tmp := *index_adr
5148   cmpptr(tmp, 0);                       // tmp == 0?
5149   jcc(Assembler::equal, runtime);       // If yes, goto runtime
5150 
5151   subptr(tmp, wordSize);                // tmp := tmp - wordSize
5152   movptr(index, tmp);                   // *index_adr := tmp
5153   addptr(tmp, buffer);                  // tmp := tmp + *buffer_adr
5154 
5155   // Record the previous value
5156   movptr(Address(tmp, 0), pre_val);
5157   jmp(done);
5158 
5159   bind(runtime);
5160   // save the live input values
5161   if(tosca_live) push(rax);
5162 
5163   if (obj != noreg && obj != rax)
5164     push(obj);
5165 
5166   if (pre_val != rax)
5167     push(pre_val);
5168 
5169   // Calling the runtime using the regular call_VM_leaf mechanism generates
5170   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
5171   // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
5172   //
5173   // If we care generating the pre-barrier without a frame (e.g. in the
5174   // intrinsified Reference.get() routine) then ebp might be pointing to
5175   // the caller frame and so this check will most likely fail at runtime.
5176   //
5177   // Expanding the call directly bypasses the generation of the check.
5178   // So when we do not have have a full interpreter frame on the stack
5179   // expand_call should be passed true.
5180 
5181   NOT_LP64( push(thread); )
5182 
5183   if (expand_call) {
5184     LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
5185     pass_arg1(this, thread);
5186     pass_arg0(this, pre_val);
5187     MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
5188   } else {
5189     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
5190   }
5191 
5192   NOT_LP64( pop(thread); )
5193 
5194   // save the live input values
5195   if (pre_val != rax)
5196     pop(pre_val);
5197 
5198   if (obj != noreg && obj != rax)
5199     pop(obj);
5200 
5201   if(tosca_live) pop(rax);
5202 
5203   bind(done);
5204 }
5205 
5206 void MacroAssembler::g1_write_barrier_post(Register store_addr,
5207                                            Register new_val,
5208                                            Register thread,
5209                                            Register tmp,
5210                                            Register tmp2) {
5211 #ifdef _LP64
5212   assert(thread == r15_thread, "must be");
5213 #endif // _LP64
5214 
5215   Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
5216                                        DirtyCardQueue::byte_offset_of_index()));
5217   Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
5218                                        DirtyCardQueue::byte_offset_of_buf()));
5219 
5220   CardTableModRefBS* ct =
5221     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
5222   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
5223 
5224   Label done;
5225   Label runtime;
5226 
5227   // Does store cross heap regions?
5228 
5229   movptr(tmp, store_addr);
5230   xorptr(tmp, new_val);
5231   shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
5232   jcc(Assembler::equal, done);
5233 
5234   // crosses regions, storing NULL?
5235 
5236   cmpptr(new_val, (int32_t) NULL_WORD);
5237   jcc(Assembler::equal, done);
5238 
5239   // storing region crossing non-NULL, is card already dirty?
5240 
5241   const Register card_addr = tmp;
5242   const Register cardtable = tmp2;
5243 
5244   movptr(card_addr, store_addr);
5245   shrptr(card_addr, CardTableModRefBS::card_shift);
5246   // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
5247   // a valid address and therefore is not properly handled by the relocation code.
5248   movptr(cardtable, (intptr_t)ct->byte_map_base);
5249   addptr(card_addr, cardtable);
5250 
5251   cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
5252   jcc(Assembler::equal, done);
5253 
5254   membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
5255   cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
5256   jcc(Assembler::equal, done);
5257 
5258 
5259   // storing a region crossing, non-NULL oop, card is clean.
5260   // dirty card and log.
5261 
5262   movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
5263 
5264   cmpl(queue_index, 0);
5265   jcc(Assembler::equal, runtime);
5266   subl(queue_index, wordSize);
5267   movptr(tmp2, buffer);
5268 #ifdef _LP64
5269   movslq(rscratch1, queue_index);
5270   addq(tmp2, rscratch1);
5271   movq(Address(tmp2, 0), card_addr);
5272 #else
5273   addl(tmp2, queue_index);
5274   movl(Address(tmp2, 0), card_addr);
5275 #endif
5276   jmp(done);
5277 
5278   bind(runtime);
5279   // save the live input values
5280   push(store_addr);
5281   push(new_val);
5282 #ifdef _LP64
5283   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
5284 #else
5285   push(thread);
5286   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
5287   pop(thread);
5288 #endif
5289   pop(new_val);
5290   pop(store_addr);
5291 
5292   bind(done);
5293 }
5294 
5295 #endif // INCLUDE_ALL_GCS
5296 //////////////////////////////////////////////////////////////////////////////////
5297 
5298 
5299 void MacroAssembler::store_check(Register obj, Address dst) {
5300   store_check(obj);
5301 }
5302 
5303 void MacroAssembler::store_check(Register obj) {
5304   // Does a store check for the oop in register obj. The content of
5305   // register obj is destroyed afterwards.
5306   BarrierSet* bs = Universe::heap()->barrier_set();
5307   assert(bs->kind() == BarrierSet::CardTableForRS ||
5308          bs->kind() == BarrierSet::CardTableExtension,
5309          "Wrong barrier set kind");
5310 
5311   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
5312   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
5313 
5314   shrptr(obj, CardTableModRefBS::card_shift);
5315 
5316   Address card_addr;
5317 
5318   // The calculation for byte_map_base is as follows:
5319   // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
5320   // So this essentially converts an address to a displacement and it will
5321   // never need to be relocated. On 64bit however the value may be too
5322   // large for a 32bit displacement.
5323   intptr_t disp = (intptr_t) ct->byte_map_base;
5324   if (is_simm32(disp)) {
5325     card_addr = Address(noreg, obj, Address::times_1, disp);
5326   } else {
5327     // By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
5328     // displacement and done in a single instruction given favorable mapping and a
5329     // smarter version of as_Address. However, 'ExternalAddress' generates a relocation
5330     // entry and that entry is not properly handled by the relocation code.
5331     AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
5332     Address index(noreg, obj, Address::times_1);
5333     card_addr = as_Address(ArrayAddress(cardtable, index));
5334   }
5335 
5336   int dirty = CardTableModRefBS::dirty_card_val();
5337   if (UseCondCardMark) {
5338     Label L_already_dirty;
5339     if (UseConcMarkSweepGC) {
5340       membar(Assembler::StoreLoad);
5341     }
5342     cmpb(card_addr, dirty);
5343     jcc(Assembler::equal, L_already_dirty);
5344     movb(card_addr, dirty);
5345     bind(L_already_dirty);
5346   } else {
5347     movb(card_addr, dirty);
5348   }
5349 }
5350 
5351 void MacroAssembler::subptr(Register dst, int32_t imm32) {
5352   LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
5353 }
5354 
5355 // Force generation of a 4 byte immediate value even if it fits into 8bit
5356 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
5357   LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
5358 }
5359 
5360 void MacroAssembler::subptr(Register dst, Register src) {
5361   LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
5362 }
5363 
5364 // C++ bool manipulation
5365 void MacroAssembler::testbool(Register dst) {
5366   if(sizeof(bool) == 1)
5367     testb(dst, 0xff);
5368   else if(sizeof(bool) == 2) {
5369     // testw implementation needed for two byte bools
5370     ShouldNotReachHere();
5371   } else if(sizeof(bool) == 4)
5372     testl(dst, dst);
5373   else
5374     // unsupported
5375     ShouldNotReachHere();
5376 }
5377 
5378 void MacroAssembler::testptr(Register dst, Register src) {
5379   LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
5380 }
5381 
5382 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
5383 void MacroAssembler::tlab_allocate(Register obj,
5384                                    Register var_size_in_bytes,
5385                                    int con_size_in_bytes,
5386                                    Register t1,
5387                                    Register t2,
5388                                    Label& slow_case) {
5389   assert_different_registers(obj, t1, t2);
5390   assert_different_registers(obj, var_size_in_bytes, t1);
5391   Register end = t2;
5392   Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
5393 
5394   verify_tlab();
5395 
5396   NOT_LP64(get_thread(thread));
5397 
5398   movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
5399   if (var_size_in_bytes == noreg) {
5400     lea(end, Address(obj, con_size_in_bytes));
5401   } else {
5402     lea(end, Address(obj, var_size_in_bytes, Address::times_1));
5403   }
5404   cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
5405   jcc(Assembler::above, slow_case);
5406 
5407   // update the tlab top pointer
5408   movptr(Address(thread, JavaThread::tlab_top_offset()), end);
5409 
5410   // recover var_size_in_bytes if necessary
5411   if (var_size_in_bytes == end) {
5412     subptr(var_size_in_bytes, obj);
5413   }
5414   verify_tlab();
5415 }
5416 
5417 // Preserves rbx, and rdx.
5418 Register MacroAssembler::tlab_refill(Label& retry,
5419                                      Label& try_eden,
5420                                      Label& slow_case) {
5421   Register top = rax;
5422   Register t1  = rcx; // object size
5423   Register t2  = rsi;
5424   Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
5425   assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
5426   Label do_refill, discard_tlab;
5427 
5428   if (!Universe::heap()->supports_inline_contig_alloc()) {
5429     // No allocation in the shared eden.
5430     jmp(slow_case);
5431   }
5432 
5433   NOT_LP64(get_thread(thread_reg));
5434 
5435   movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
5436   movptr(t1,  Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
5437 
5438   // calculate amount of free space
5439   subptr(t1, top);
5440   shrptr(t1, LogHeapWordSize);
5441 
5442   // Retain tlab and allocate object in shared space if
5443   // the amount free in the tlab is too large to discard.
5444   cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
5445   jcc(Assembler::lessEqual, discard_tlab);
5446 
5447   // Retain
5448   // %%% yuck as movptr...
5449   movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
5450   addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
5451   if (TLABStats) {
5452     // increment number of slow_allocations
5453     addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
5454   }
5455   jmp(try_eden);
5456 
5457   bind(discard_tlab);
5458   if (TLABStats) {
5459     // increment number of refills
5460     addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
5461     // accumulate wastage -- t1 is amount free in tlab
5462     addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
5463   }
5464 
5465   // if tlab is currently allocated (top or end != null) then
5466   // fill [top, end + alignment_reserve) with array object
5467   testptr(top, top);
5468   jcc(Assembler::zero, do_refill);
5469 
5470   // set up the mark word
5471   movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
5472   // set the length to the remaining space
5473   subptr(t1, typeArrayOopDesc::header_size(T_INT));
5474   addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
5475   shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
5476   movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
5477   // set klass to intArrayKlass
5478   // dubious reloc why not an oop reloc?
5479   movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
5480   // store klass last.  concurrent gcs assumes klass length is valid if
5481   // klass field is not null.
5482   store_klass(top, t1);
5483 
5484   movptr(t1, top);
5485   subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
5486   incr_allocated_bytes(thread_reg, t1, 0);
5487 
5488   // refill the tlab with an eden allocation
5489   bind(do_refill);
5490   movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
5491   shlptr(t1, LogHeapWordSize);
5492   // allocate new tlab, address returned in top
5493   eden_allocate(top, t1, 0, t2, slow_case);
5494 
5495   // Check that t1 was preserved in eden_allocate.
5496 #ifdef ASSERT
5497   if (UseTLAB) {
5498     Label ok;
5499     Register tsize = rsi;
5500     assert_different_registers(tsize, thread_reg, t1);
5501     push(tsize);
5502     movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
5503     shlptr(tsize, LogHeapWordSize);
5504     cmpptr(t1, tsize);
5505     jcc(Assembler::equal, ok);
5506     STOP("assert(t1 != tlab size)");
5507     should_not_reach_here();
5508 
5509     bind(ok);
5510     pop(tsize);
5511   }
5512 #endif
5513   movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
5514   movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
5515   addptr(top, t1);
5516   subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
5517   movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
5518 
5519   if (ZeroTLAB) {
5520     // This is a fast TLAB refill, therefore the GC is not notified of it.
5521     // So compiled code must fill the new TLAB with zeroes.
5522     movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
5523     zero_memory(top, t1, 0, t2);
5524   }
5525 
5526   verify_tlab();
5527   jmp(retry);
5528 
5529   return thread_reg; // for use by caller
5530 }
5531 
5532 // Preserves the contents of address, destroys the contents length_in_bytes and temp.
5533 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
5534   assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");
5535   assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord");
5536   Label done;
5537 
5538   testptr(length_in_bytes, length_in_bytes);
5539   jcc(Assembler::zero, done);
5540 
5541   // initialize topmost word, divide index by 2, check if odd and test if zero
5542   // note: for the remaining code to work, index must be a multiple of BytesPerWord
5543 #ifdef ASSERT
5544   {
5545     Label L;
5546     testptr(length_in_bytes, BytesPerWord - 1);
5547     jcc(Assembler::zero, L);
5548     stop("length must be a multiple of BytesPerWord");
5549     bind(L);
5550   }
5551 #endif
5552   Register index = length_in_bytes;
5553   xorptr(temp, temp);    // use _zero reg to clear memory (shorter code)
5554   if (UseIncDec) {
5555     shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
5556   } else {
5557     shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
5558     shrptr(index, 1);
5559   }
5560 #ifndef _LP64
5561   // index could have not been a multiple of 8 (i.e., bit 2 was set)
5562   {
5563     Label even;
5564     // note: if index was a multiple of 8, then it cannot
5565     //       be 0 now otherwise it must have been 0 before
5566     //       => if it is even, we don't need to check for 0 again
5567     jcc(Assembler::carryClear, even);
5568     // clear topmost word (no jump would be needed if conditional assignment worked here)
5569     movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp);
5570     // index could be 0 now, must check again
5571     jcc(Assembler::zero, done);
5572     bind(even);
5573   }
5574 #endif // !_LP64
5575   // initialize remaining object fields: index is a multiple of 2 now
5576   {
5577     Label loop;
5578     bind(loop);
5579     movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp);
5580     NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);)
5581     decrement(index);
5582     jcc(Assembler::notZero, loop);
5583   }
5584 
5585   bind(done);
5586 }
5587 
5588 void MacroAssembler::incr_allocated_bytes(Register thread,
5589                                           Register var_size_in_bytes,
5590                                           int con_size_in_bytes,
5591                                           Register t1) {
5592   if (!thread->is_valid()) {
5593 #ifdef _LP64
5594     thread = r15_thread;
5595 #else
5596     assert(t1->is_valid(), "need temp reg");
5597     thread = t1;
5598     get_thread(thread);
5599 #endif
5600   }
5601 
5602 #ifdef _LP64
5603   if (var_size_in_bytes->is_valid()) {
5604     addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
5605   } else {
5606     addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
5607   }
5608 #else
5609   if (var_size_in_bytes->is_valid()) {
5610     addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
5611   } else {
5612     addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
5613   }
5614   adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
5615 #endif
5616 }
5617 
5618 void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
5619   pusha();
5620 
5621   // if we are coming from c1, xmm registers may be live
5622   int num_xmm_regs = LP64_ONLY(16) NOT_LP64(8);
5623   if (UseAVX > 2) {
5624     num_xmm_regs = LP64_ONLY(32) NOT_LP64(8);
5625   }
5626 
5627   if (UseSSE == 1)  {
5628     subptr(rsp, sizeof(jdouble)*8);
5629     for (int n = 0; n < 8; n++) {
5630       movflt(Address(rsp, n*sizeof(jdouble)), as_XMMRegister(n));
5631     }
5632   } else if (UseSSE >= 2)  {
5633     if (UseAVX > 2) {
5634       push(rbx);
5635       movl(rbx, 0xffff);
5636       kmovwl(k1, rbx);
5637       pop(rbx);
5638     }
5639 #ifdef COMPILER2
5640     if (MaxVectorSize > 16) {
5641       if(UseAVX > 2) {
5642         // Save upper half of ZMM registers
5643         subptr(rsp, 32*num_xmm_regs);
5644         for (int n = 0; n < num_xmm_regs; n++) {
5645           vextractf64x4h(Address(rsp, n*32), as_XMMRegister(n), 1);
5646         }
5647       }
5648       assert(UseAVX > 0, "256 bit vectors are supported only with AVX");
5649       // Save upper half of YMM registers
5650       subptr(rsp, 16*num_xmm_regs);
5651       for (int n = 0; n < num_xmm_regs; n++) {
5652         vextractf128h(Address(rsp, n*16), as_XMMRegister(n));
5653       }
5654     }
5655 #endif
5656     // Save whole 128bit (16 bytes) XMM registers
5657     subptr(rsp, 16*num_xmm_regs);
5658 #ifdef _LP64
5659     if (VM_Version::supports_evex()) {
5660       for (int n = 0; n < num_xmm_regs; n++) {
5661         vextractf32x4h(Address(rsp, n*16), as_XMMRegister(n), 0);
5662       }
5663     } else {
5664       for (int n = 0; n < num_xmm_regs; n++) {
5665         movdqu(Address(rsp, n*16), as_XMMRegister(n));
5666       }
5667     }
5668 #else
5669     for (int n = 0; n < num_xmm_regs; n++) {
5670       movdqu(Address(rsp, n*16), as_XMMRegister(n));
5671     }
5672 #endif
5673   }
5674 
5675   // Preserve registers across runtime call
5676   int incoming_argument_and_return_value_offset = -1;
5677   if (num_fpu_regs_in_use > 1) {
5678     // Must preserve all other FPU regs (could alternatively convert
5679     // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
5680     // FPU state, but can not trust C compiler)
5681     NEEDS_CLEANUP;
5682     // NOTE that in this case we also push the incoming argument(s) to
5683     // the stack and restore it later; we also use this stack slot to
5684     // hold the return value from dsin, dcos etc.
5685     for (int i = 0; i < num_fpu_regs_in_use; i++) {
5686       subptr(rsp, sizeof(jdouble));
5687       fstp_d(Address(rsp, 0));
5688     }
5689     incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
5690     for (int i = nb_args-1; i >= 0; i--) {
5691       fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
5692     }
5693   }
5694 
5695   subptr(rsp, nb_args*sizeof(jdouble));
5696   for (int i = 0; i < nb_args; i++) {
5697     fstp_d(Address(rsp, i*sizeof(jdouble)));
5698   }
5699 
5700 #ifdef _LP64
5701   if (nb_args > 0) {
5702     movdbl(xmm0, Address(rsp, 0));
5703   }
5704   if (nb_args > 1) {
5705     movdbl(xmm1, Address(rsp, sizeof(jdouble)));
5706   }
5707   assert(nb_args <= 2, "unsupported number of args");
5708 #endif // _LP64
5709 
5710   // NOTE: we must not use call_VM_leaf here because that requires a
5711   // complete interpreter frame in debug mode -- same bug as 4387334
5712   // MacroAssembler::call_VM_leaf_base is perfectly safe and will
5713   // do proper 64bit abi
5714 
5715   NEEDS_CLEANUP;
5716   // Need to add stack banging before this runtime call if it needs to
5717   // be taken; however, there is no generic stack banging routine at
5718   // the MacroAssembler level
5719 
5720   MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
5721 
5722 #ifdef _LP64
5723   movsd(Address(rsp, 0), xmm0);
5724   fld_d(Address(rsp, 0));
5725 #endif // _LP64
5726   addptr(rsp, sizeof(jdouble)*nb_args);
5727   if (num_fpu_regs_in_use > 1) {
5728     // Must save return value to stack and then restore entire FPU
5729     // stack except incoming arguments
5730     fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
5731     for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
5732       fld_d(Address(rsp, 0));
5733       addptr(rsp, sizeof(jdouble));
5734     }
5735     fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
5736     addptr(rsp, sizeof(jdouble)*nb_args);
5737   }
5738 
5739   if (UseSSE == 1)  {
5740     for (int n = 0; n < 8; n++) {
5741       movflt(as_XMMRegister(n), Address(rsp, n*sizeof(jdouble)));
5742     }
5743     addptr(rsp, sizeof(jdouble)*8);
5744   } else if (UseSSE >= 2)  {
5745     // Restore whole 128bit (16 bytes) XMM registers
5746 #ifdef _LP64
5747   if (VM_Version::supports_evex()) {
5748     for (int n = 0; n < num_xmm_regs; n++) {
5749       vinsertf32x4h(as_XMMRegister(n), Address(rsp, n*16), 0);
5750     }
5751   } else {
5752     for (int n = 0; n < num_xmm_regs; n++) {
5753       movdqu(as_XMMRegister(n), Address(rsp, n*16));
5754     }
5755   }
5756 #else
5757   for (int n = 0; n < num_xmm_regs; n++) {
5758     movdqu(as_XMMRegister(n), Address(rsp, n*16));
5759   }
5760 #endif
5761     addptr(rsp, 16*num_xmm_regs);
5762 
5763 #ifdef COMPILER2
5764     if (MaxVectorSize > 16) {
5765       // Restore upper half of YMM registers.
5766       for (int n = 0; n < num_xmm_regs; n++) {
5767         vinsertf128h(as_XMMRegister(n), Address(rsp, n*16));
5768       }
5769       addptr(rsp, 16*num_xmm_regs);
5770       if(UseAVX > 2) {
5771         for (int n = 0; n < num_xmm_regs; n++) {
5772           vinsertf64x4h(as_XMMRegister(n), Address(rsp, n*32), 1);
5773         }
5774         addptr(rsp, 32*num_xmm_regs);
5775       }
5776     }
5777 #endif
5778   }
5779   popa();
5780 }
5781 
5782 static const double     pi_4 =  0.7853981633974483;
5783 
5784 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
5785   // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
5786   // was attempted in this code; unfortunately it appears that the
5787   // switch to 80-bit precision and back causes this to be
5788   // unprofitable compared with simply performing a runtime call if
5789   // the argument is out of the (-pi/4, pi/4) range.
5790 
5791   Register tmp = noreg;
5792   if (!VM_Version::supports_cmov()) {
5793     // fcmp needs a temporary so preserve rbx,
5794     tmp = rbx;
5795     push(tmp);
5796   }
5797 
5798   Label slow_case, done;
5799 
5800   ExternalAddress pi4_adr = (address)&pi_4;
5801   if (reachable(pi4_adr)) {
5802     // x ?<= pi/4
5803     fld_d(pi4_adr);
5804     fld_s(1);                // Stack:  X  PI/4  X
5805     fabs();                  // Stack: |X| PI/4  X
5806     fcmp(tmp);
5807     jcc(Assembler::above, slow_case);
5808 
5809     // fastest case: -pi/4 <= x <= pi/4
5810     switch(trig) {
5811     case 's':
5812       fsin();
5813       break;
5814     case 'c':
5815       fcos();
5816       break;
5817     case 't':
5818       ftan();
5819       break;
5820     default:
5821       assert(false, "bad intrinsic");
5822       break;
5823     }
5824     jmp(done);
5825   }
5826 
5827   // slow case: runtime call
5828   bind(slow_case);
5829 
5830   switch(trig) {
5831   case 's':
5832     {
5833       fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
5834     }
5835     break;
5836   case 'c':
5837     {
5838       fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
5839     }
5840     break;
5841   case 't':
5842     {
5843       fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
5844     }
5845     break;
5846   default:
5847     assert(false, "bad intrinsic");
5848     break;
5849   }
5850 
5851   // Come here with result in F-TOS
5852   bind(done);
5853 
5854   if (tmp != noreg) {
5855     pop(tmp);
5856   }
5857 }
5858 
5859 
5860 // Look up the method for a megamorphic invokeinterface call.
5861 // The target method is determined by <intf_klass, itable_index>.
5862 // The receiver klass is in recv_klass.
5863 // On success, the result will be in method_result, and execution falls through.
5864 // On failure, execution transfers to the given label.
5865 void MacroAssembler::lookup_interface_method(Register recv_klass,
5866                                              Register intf_klass,
5867                                              RegisterOrConstant itable_index,
5868                                              Register method_result,
5869                                              Register scan_temp,
5870                                              Label& L_no_such_interface) {
5871   assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
5872   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
5873          "caller must use same register for non-constant itable index as for method");
5874 
5875   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
5876   int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
5877   int itentry_off = itableMethodEntry::method_offset_in_bytes();
5878   int scan_step   = itableOffsetEntry::size() * wordSize;
5879   int vte_size    = vtableEntry::size() * wordSize;
5880   Address::ScaleFactor times_vte_scale = Address::times_ptr;
5881   assert(vte_size == wordSize, "else adjust times_vte_scale");
5882 
5883   movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
5884 
5885   // %%% Could store the aligned, prescaled offset in the klassoop.
5886   lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
5887   if (HeapWordsPerLong > 1) {
5888     // Round up to align_object_offset boundary
5889     // see code for InstanceKlass::start_of_itable!
5890     round_to(scan_temp, BytesPerLong);
5891   }
5892 
5893   // Adjust recv_klass by scaled itable_index, so we can free itable_index.
5894   assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
5895   lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
5896 
5897   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
5898   //   if (scan->interface() == intf) {
5899   //     result = (klass + scan->offset() + itable_index);
5900   //   }
5901   // }
5902   Label search, found_method;
5903 
5904   for (int peel = 1; peel >= 0; peel--) {
5905     movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
5906     cmpptr(intf_klass, method_result);
5907 
5908     if (peel) {
5909       jccb(Assembler::equal, found_method);
5910     } else {
5911       jccb(Assembler::notEqual, search);
5912       // (invert the test to fall through to found_method...)
5913     }
5914 
5915     if (!peel)  break;
5916 
5917     bind(search);
5918 
5919     // Check that the previous entry is non-null.  A null entry means that
5920     // the receiver class doesn't implement the interface, and wasn't the
5921     // same as when the caller was compiled.
5922     testptr(method_result, method_result);
5923     jcc(Assembler::zero, L_no_such_interface);
5924     addptr(scan_temp, scan_step);
5925   }
5926 
5927   bind(found_method);
5928 
5929   // Got a hit.
5930   movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
5931   movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
5932 }
5933 
5934 
5935 // virtual method calling
5936 void MacroAssembler::lookup_virtual_method(Register recv_klass,
5937                                            RegisterOrConstant vtable_index,
5938                                            Register method_result) {
5939   const int base = InstanceKlass::vtable_start_offset() * wordSize;
5940   assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
5941   Address vtable_entry_addr(recv_klass,
5942                             vtable_index, Address::times_ptr,
5943                             base + vtableEntry::method_offset_in_bytes());
5944   movptr(method_result, vtable_entry_addr);
5945 }
5946 
5947 
5948 void MacroAssembler::check_klass_subtype(Register sub_klass,
5949                            Register super_klass,
5950                            Register temp_reg,
5951                            Label& L_success) {
5952   Label L_failure;
5953   check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, NULL);
5954   check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
5955   bind(L_failure);
5956 }
5957 
5958 
5959 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
5960                                                    Register super_klass,
5961                                                    Register temp_reg,
5962                                                    Label* L_success,
5963                                                    Label* L_failure,
5964                                                    Label* L_slow_path,
5965                                         RegisterOrConstant super_check_offset) {
5966   assert_different_registers(sub_klass, super_klass, temp_reg);
5967   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
5968   if (super_check_offset.is_register()) {
5969     assert_different_registers(sub_klass, super_klass,
5970                                super_check_offset.as_register());
5971   } else if (must_load_sco) {
5972     assert(temp_reg != noreg, "supply either a temp or a register offset");
5973   }
5974 
5975   Label L_fallthrough;
5976   int label_nulls = 0;
5977   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
5978   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
5979   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
5980   assert(label_nulls <= 1, "at most one NULL in the batch");
5981 
5982   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
5983   int sco_offset = in_bytes(Klass::super_check_offset_offset());
5984   Address super_check_offset_addr(super_klass, sco_offset);
5985 
5986   // Hacked jcc, which "knows" that L_fallthrough, at least, is in
5987   // range of a jccb.  If this routine grows larger, reconsider at
5988   // least some of these.
5989 #define local_jcc(assembler_cond, label)                                \
5990   if (&(label) == &L_fallthrough)  jccb(assembler_cond, label);         \
5991   else                             jcc( assembler_cond, label) /*omit semi*/
5992 
5993   // Hacked jmp, which may only be used just before L_fallthrough.
5994 #define final_jmp(label)                                                \
5995   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
5996   else                            jmp(label)                /*omit semi*/
5997 
5998   // If the pointers are equal, we are done (e.g., String[] elements).
5999   // This self-check enables sharing of secondary supertype arrays among
6000   // non-primary types such as array-of-interface.  Otherwise, each such
6001   // type would need its own customized SSA.
6002   // We move this check to the front of the fast path because many
6003   // type checks are in fact trivially successful in this manner,
6004   // so we get a nicely predicted branch right at the start of the check.
6005   cmpptr(sub_klass, super_klass);
6006   local_jcc(Assembler::equal, *L_success);
6007 
6008   // Check the supertype display:
6009   if (must_load_sco) {
6010     // Positive movl does right thing on LP64.
6011     movl(temp_reg, super_check_offset_addr);
6012     super_check_offset = RegisterOrConstant(temp_reg);
6013   }
6014   Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
6015   cmpptr(super_klass, super_check_addr); // load displayed supertype
6016 
6017   // This check has worked decisively for primary supers.
6018   // Secondary supers are sought in the super_cache ('super_cache_addr').
6019   // (Secondary supers are interfaces and very deeply nested subtypes.)
6020   // This works in the same check above because of a tricky aliasing
6021   // between the super_cache and the primary super display elements.
6022   // (The 'super_check_addr' can address either, as the case requires.)
6023   // Note that the cache is updated below if it does not help us find
6024   // what we need immediately.
6025   // So if it was a primary super, we can just fail immediately.
6026   // Otherwise, it's the slow path for us (no success at this point).
6027 
6028   if (super_check_offset.is_register()) {
6029     local_jcc(Assembler::equal, *L_success);
6030     cmpl(super_check_offset.as_register(), sc_offset);
6031     if (L_failure == &L_fallthrough) {
6032       local_jcc(Assembler::equal, *L_slow_path);
6033     } else {
6034       local_jcc(Assembler::notEqual, *L_failure);
6035       final_jmp(*L_slow_path);
6036     }
6037   } else if (super_check_offset.as_constant() == sc_offset) {
6038     // Need a slow path; fast failure is impossible.
6039     if (L_slow_path == &L_fallthrough) {
6040       local_jcc(Assembler::equal, *L_success);
6041     } else {
6042       local_jcc(Assembler::notEqual, *L_slow_path);
6043       final_jmp(*L_success);
6044     }
6045   } else {
6046     // No slow path; it's a fast decision.
6047     if (L_failure == &L_fallthrough) {
6048       local_jcc(Assembler::equal, *L_success);
6049     } else {
6050       local_jcc(Assembler::notEqual, *L_failure);
6051       final_jmp(*L_success);
6052     }
6053   }
6054 
6055   bind(L_fallthrough);
6056 
6057 #undef local_jcc
6058 #undef final_jmp
6059 }
6060 
6061 
6062 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
6063                                                    Register super_klass,
6064                                                    Register temp_reg,
6065                                                    Register temp2_reg,
6066                                                    Label* L_success,
6067                                                    Label* L_failure,
6068                                                    bool set_cond_codes) {
6069   assert_different_registers(sub_klass, super_klass, temp_reg);
6070   if (temp2_reg != noreg)
6071     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
6072 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
6073 
6074   Label L_fallthrough;
6075   int label_nulls = 0;
6076   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
6077   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
6078   assert(label_nulls <= 1, "at most one NULL in the batch");
6079 
6080   // a couple of useful fields in sub_klass:
6081   int ss_offset = in_bytes(Klass::secondary_supers_offset());
6082   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
6083   Address secondary_supers_addr(sub_klass, ss_offset);
6084   Address super_cache_addr(     sub_klass, sc_offset);
6085 
6086   // Do a linear scan of the secondary super-klass chain.
6087   // This code is rarely used, so simplicity is a virtue here.
6088   // The repne_scan instruction uses fixed registers, which we must spill.
6089   // Don't worry too much about pre-existing connections with the input regs.
6090 
6091   assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
6092   assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
6093 
6094   // Get super_klass value into rax (even if it was in rdi or rcx).
6095   bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
6096   if (super_klass != rax || UseCompressedOops) {
6097     if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
6098     mov(rax, super_klass);
6099   }
6100   if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
6101   if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
6102 
6103 #ifndef PRODUCT
6104   int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
6105   ExternalAddress pst_counter_addr((address) pst_counter);
6106   NOT_LP64(  incrementl(pst_counter_addr) );
6107   LP64_ONLY( lea(rcx, pst_counter_addr) );
6108   LP64_ONLY( incrementl(Address(rcx, 0)) );
6109 #endif //PRODUCT
6110 
6111   // We will consult the secondary-super array.
6112   movptr(rdi, secondary_supers_addr);
6113   // Load the array length.  (Positive movl does right thing on LP64.)
6114   movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
6115   // Skip to start of data.
6116   addptr(rdi, Array<Klass*>::base_offset_in_bytes());
6117 
6118   // Scan RCX words at [RDI] for an occurrence of RAX.
6119   // Set NZ/Z based on last compare.
6120   // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
6121   // not change flags (only scas instruction which is repeated sets flags).
6122   // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
6123 
6124     testptr(rax,rax); // Set Z = 0
6125     repne_scan();
6126 
6127   // Unspill the temp. registers:
6128   if (pushed_rdi)  pop(rdi);
6129   if (pushed_rcx)  pop(rcx);
6130   if (pushed_rax)  pop(rax);
6131 
6132   if (set_cond_codes) {
6133     // Special hack for the AD files:  rdi is guaranteed non-zero.
6134     assert(!pushed_rdi, "rdi must be left non-NULL");
6135     // Also, the condition codes are properly set Z/NZ on succeed/failure.
6136   }
6137 
6138   if (L_failure == &L_fallthrough)
6139         jccb(Assembler::notEqual, *L_failure);
6140   else  jcc(Assembler::notEqual, *L_failure);
6141 
6142   // Success.  Cache the super we found and proceed in triumph.
6143   movptr(super_cache_addr, super_klass);
6144 
6145   if (L_success != &L_fallthrough) {
6146     jmp(*L_success);
6147   }
6148 
6149 #undef IS_A_TEMP
6150 
6151   bind(L_fallthrough);
6152 }
6153 
6154 
6155 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
6156   if (VM_Version::supports_cmov()) {
6157     cmovl(cc, dst, src);
6158   } else {
6159     Label L;
6160     jccb(negate_condition(cc), L);
6161     movl(dst, src);
6162     bind(L);
6163   }
6164 }
6165 
6166 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
6167   if (VM_Version::supports_cmov()) {
6168     cmovl(cc, dst, src);
6169   } else {
6170     Label L;
6171     jccb(negate_condition(cc), L);
6172     movl(dst, src);
6173     bind(L);
6174   }
6175 }
6176 
6177 void MacroAssembler::verify_oop(Register reg, const char* s) {
6178   if (!VerifyOops) return;
6179 
6180   // Pass register number to verify_oop_subroutine
6181   const char* b = NULL;
6182   {
6183     ResourceMark rm;
6184     stringStream ss;
6185     ss.print("verify_oop: %s: %s", reg->name(), s);
6186     b = code_string(ss.as_string());
6187   }
6188   BLOCK_COMMENT("verify_oop {");
6189 #ifdef _LP64
6190   push(rscratch1);                    // save r10, trashed by movptr()
6191 #endif
6192   push(rax);                          // save rax,
6193   push(reg);                          // pass register argument
6194   ExternalAddress buffer((address) b);
6195   // avoid using pushptr, as it modifies scratch registers
6196   // and our contract is not to modify anything
6197   movptr(rax, buffer.addr());
6198   push(rax);
6199   // call indirectly to solve generation ordering problem
6200   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
6201   call(rax);
6202   // Caller pops the arguments (oop, message) and restores rax, r10
6203   BLOCK_COMMENT("} verify_oop");
6204 }
6205 
6206 
6207 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
6208                                                       Register tmp,
6209                                                       int offset) {
6210   intptr_t value = *delayed_value_addr;
6211   if (value != 0)
6212     return RegisterOrConstant(value + offset);
6213 
6214   // load indirectly to solve generation ordering problem
6215   movptr(tmp, ExternalAddress((address) delayed_value_addr));
6216 
6217 #ifdef ASSERT
6218   { Label L;
6219     testptr(tmp, tmp);
6220     if (WizardMode) {
6221       const char* buf = NULL;
6222       {
6223         ResourceMark rm;
6224         stringStream ss;
6225         ss.print("DelayedValue=" INTPTR_FORMAT, delayed_value_addr[1]);
6226         buf = code_string(ss.as_string());
6227       }
6228       jcc(Assembler::notZero, L);
6229       STOP(buf);
6230     } else {
6231       jccb(Assembler::notZero, L);
6232       hlt();
6233     }
6234     bind(L);
6235   }
6236 #endif
6237 
6238   if (offset != 0)
6239     addptr(tmp, offset);
6240 
6241   return RegisterOrConstant(tmp);
6242 }
6243 
6244 
6245 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
6246                                          int extra_slot_offset) {
6247   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
6248   int stackElementSize = Interpreter::stackElementSize;
6249   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
6250 #ifdef ASSERT
6251   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
6252   assert(offset1 - offset == stackElementSize, "correct arithmetic");
6253 #endif
6254   Register             scale_reg    = noreg;
6255   Address::ScaleFactor scale_factor = Address::no_scale;
6256   if (arg_slot.is_constant()) {
6257     offset += arg_slot.as_constant() * stackElementSize;
6258   } else {
6259     scale_reg    = arg_slot.as_register();
6260     scale_factor = Address::times(stackElementSize);
6261   }
6262   offset += wordSize;           // return PC is on stack
6263   return Address(rsp, scale_reg, scale_factor, offset);
6264 }
6265 
6266 
6267 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
6268   if (!VerifyOops) return;
6269 
6270   // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
6271   // Pass register number to verify_oop_subroutine
6272   const char* b = NULL;
6273   {
6274     ResourceMark rm;
6275     stringStream ss;
6276     ss.print("verify_oop_addr: %s", s);
6277     b = code_string(ss.as_string());
6278   }
6279 #ifdef _LP64
6280   push(rscratch1);                    // save r10, trashed by movptr()
6281 #endif
6282   push(rax);                          // save rax,
6283   // addr may contain rsp so we will have to adjust it based on the push
6284   // we just did (and on 64 bit we do two pushes)
6285   // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
6286   // stores rax into addr which is backwards of what was intended.
6287   if (addr.uses(rsp)) {
6288     lea(rax, addr);
6289     pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
6290   } else {
6291     pushptr(addr);
6292   }
6293 
6294   ExternalAddress buffer((address) b);
6295   // pass msg argument
6296   // avoid using pushptr, as it modifies scratch registers
6297   // and our contract is not to modify anything
6298   movptr(rax, buffer.addr());
6299   push(rax);
6300 
6301   // call indirectly to solve generation ordering problem
6302   movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
6303   call(rax);
6304   // Caller pops the arguments (addr, message) and restores rax, r10.
6305 }
6306 
6307 void MacroAssembler::verify_tlab() {
6308 #ifdef ASSERT
6309   if (UseTLAB && VerifyOops) {
6310     Label next, ok;
6311     Register t1 = rsi;
6312     Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
6313 
6314     push(t1);
6315     NOT_LP64(push(thread_reg));
6316     NOT_LP64(get_thread(thread_reg));
6317 
6318     movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
6319     cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
6320     jcc(Assembler::aboveEqual, next);
6321     STOP("assert(top >= start)");
6322     should_not_reach_here();
6323 
6324     bind(next);
6325     movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
6326     cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
6327     jcc(Assembler::aboveEqual, ok);
6328     STOP("assert(top <= end)");
6329     should_not_reach_here();
6330 
6331     bind(ok);
6332     NOT_LP64(pop(thread_reg));
6333     pop(t1);
6334   }
6335 #endif
6336 }
6337 
6338 class ControlWord {
6339  public:
6340   int32_t _value;
6341 
6342   int  rounding_control() const        { return  (_value >> 10) & 3      ; }
6343   int  precision_control() const       { return  (_value >>  8) & 3      ; }
6344   bool precision() const               { return ((_value >>  5) & 1) != 0; }
6345   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
6346   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
6347   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
6348   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
6349   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
6350 
6351   void print() const {
6352     // rounding control
6353     const char* rc;
6354     switch (rounding_control()) {
6355       case 0: rc = "round near"; break;
6356       case 1: rc = "round down"; break;
6357       case 2: rc = "round up  "; break;
6358       case 3: rc = "chop      "; break;
6359     };
6360     // precision control
6361     const char* pc;
6362     switch (precision_control()) {
6363       case 0: pc = "24 bits "; break;
6364       case 1: pc = "reserved"; break;
6365       case 2: pc = "53 bits "; break;
6366       case 3: pc = "64 bits "; break;
6367     };
6368     // flags
6369     char f[9];
6370     f[0] = ' ';
6371     f[1] = ' ';
6372     f[2] = (precision   ()) ? 'P' : 'p';
6373     f[3] = (underflow   ()) ? 'U' : 'u';
6374     f[4] = (overflow    ()) ? 'O' : 'o';
6375     f[5] = (zero_divide ()) ? 'Z' : 'z';
6376     f[6] = (denormalized()) ? 'D' : 'd';
6377     f[7] = (invalid     ()) ? 'I' : 'i';
6378     f[8] = '\x0';
6379     // output
6380     printf("%04x  masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
6381   }
6382 
6383 };
6384 
6385 class StatusWord {
6386  public:
6387   int32_t _value;
6388 
6389   bool busy() const                    { return ((_value >> 15) & 1) != 0; }
6390   bool C3() const                      { return ((_value >> 14) & 1) != 0; }
6391   bool C2() const                      { return ((_value >> 10) & 1) != 0; }
6392   bool C1() const                      { return ((_value >>  9) & 1) != 0; }
6393   bool C0() const                      { return ((_value >>  8) & 1) != 0; }
6394   int  top() const                     { return  (_value >> 11) & 7      ; }
6395   bool error_status() const            { return ((_value >>  7) & 1) != 0; }
6396   bool stack_fault() const             { return ((_value >>  6) & 1) != 0; }
6397   bool precision() const               { return ((_value >>  5) & 1) != 0; }
6398   bool underflow() const               { return ((_value >>  4) & 1) != 0; }
6399   bool overflow() const                { return ((_value >>  3) & 1) != 0; }
6400   bool zero_divide() const             { return ((_value >>  2) & 1) != 0; }
6401   bool denormalized() const            { return ((_value >>  1) & 1) != 0; }
6402   bool invalid() const                 { return ((_value >>  0) & 1) != 0; }
6403 
6404   void print() const {
6405     // condition codes
6406     char c[5];
6407     c[0] = (C3()) ? '3' : '-';
6408     c[1] = (C2()) ? '2' : '-';
6409     c[2] = (C1()) ? '1' : '-';
6410     c[3] = (C0()) ? '0' : '-';
6411     c[4] = '\x0';
6412     // flags
6413     char f[9];
6414     f[0] = (error_status()) ? 'E' : '-';
6415     f[1] = (stack_fault ()) ? 'S' : '-';
6416     f[2] = (precision   ()) ? 'P' : '-';
6417     f[3] = (underflow   ()) ? 'U' : '-';
6418     f[4] = (overflow    ()) ? 'O' : '-';
6419     f[5] = (zero_divide ()) ? 'Z' : '-';
6420     f[6] = (denormalized()) ? 'D' : '-';
6421     f[7] = (invalid     ()) ? 'I' : '-';
6422     f[8] = '\x0';
6423     // output
6424     printf("%04x  flags = %s, cc =  %s, top = %d", _value & 0xFFFF, f, c, top());
6425   }
6426 
6427 };
6428 
6429 class TagWord {
6430  public:
6431   int32_t _value;
6432 
6433   int tag_at(int i) const              { return (_value >> (i*2)) & 3; }
6434 
6435   void print() const {
6436     printf("%04x", _value & 0xFFFF);
6437   }
6438 
6439 };
6440 
6441 class FPU_Register {
6442  public:
6443   int32_t _m0;
6444   int32_t _m1;
6445   int16_t _ex;
6446 
6447   bool is_indefinite() const           {
6448     return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
6449   }
6450 
6451   void print() const {
6452     char  sign = (_ex < 0) ? '-' : '+';
6453     const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : "   ";
6454     printf("%c%04hx.%08x%08x  %s", sign, _ex, _m1, _m0, kind);
6455   };
6456 
6457 };
6458 
6459 class FPU_State {
6460  public:
6461   enum {
6462     register_size       = 10,
6463     number_of_registers =  8,
6464     register_mask       =  7
6465   };
6466 
6467   ControlWord  _control_word;
6468   StatusWord   _status_word;
6469   TagWord      _tag_word;
6470   int32_t      _error_offset;
6471   int32_t      _error_selector;
6472   int32_t      _data_offset;
6473   int32_t      _data_selector;
6474   int8_t       _register[register_size * number_of_registers];
6475 
6476   int tag_for_st(int i) const          { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
6477   FPU_Register* st(int i) const        { return (FPU_Register*)&_register[register_size * i]; }
6478 
6479   const char* tag_as_string(int tag) const {
6480     switch (tag) {
6481       case 0: return "valid";
6482       case 1: return "zero";
6483       case 2: return "special";
6484       case 3: return "empty";
6485     }
6486     ShouldNotReachHere();
6487     return NULL;
6488   }
6489 
6490   void print() const {
6491     // print computation registers
6492     { int t = _status_word.top();
6493       for (int i = 0; i < number_of_registers; i++) {
6494         int j = (i - t) & register_mask;
6495         printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
6496         st(j)->print();
6497         printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
6498       }
6499     }
6500     printf("\n");
6501     // print control registers
6502     printf("ctrl = "); _control_word.print(); printf("\n");
6503     printf("stat = "); _status_word .print(); printf("\n");
6504     printf("tags = "); _tag_word    .print(); printf("\n");
6505   }
6506 
6507 };
6508 
6509 class Flag_Register {
6510  public:
6511   int32_t _value;
6512 
6513   bool overflow() const                { return ((_value >> 11) & 1) != 0; }
6514   bool direction() const               { return ((_value >> 10) & 1) != 0; }
6515   bool sign() const                    { return ((_value >>  7) & 1) != 0; }
6516   bool zero() const                    { return ((_value >>  6) & 1) != 0; }
6517   bool auxiliary_carry() const         { return ((_value >>  4) & 1) != 0; }
6518   bool parity() const                  { return ((_value >>  2) & 1) != 0; }
6519   bool carry() const                   { return ((_value >>  0) & 1) != 0; }
6520 
6521   void print() const {
6522     // flags
6523     char f[8];
6524     f[0] = (overflow       ()) ? 'O' : '-';
6525     f[1] = (direction      ()) ? 'D' : '-';
6526     f[2] = (sign           ()) ? 'S' : '-';
6527     f[3] = (zero           ()) ? 'Z' : '-';
6528     f[4] = (auxiliary_carry()) ? 'A' : '-';
6529     f[5] = (parity         ()) ? 'P' : '-';
6530     f[6] = (carry          ()) ? 'C' : '-';
6531     f[7] = '\x0';
6532     // output
6533     printf("%08x  flags = %s", _value, f);
6534   }
6535 
6536 };
6537 
6538 class IU_Register {
6539  public:
6540   int32_t _value;
6541 
6542   void print() const {
6543     printf("%08x  %11d", _value, _value);
6544   }
6545 
6546 };
6547 
6548 class IU_State {
6549  public:
6550   Flag_Register _eflags;
6551   IU_Register   _rdi;
6552   IU_Register   _rsi;
6553   IU_Register   _rbp;
6554   IU_Register   _rsp;
6555   IU_Register   _rbx;
6556   IU_Register   _rdx;
6557   IU_Register   _rcx;
6558   IU_Register   _rax;
6559 
6560   void print() const {
6561     // computation registers
6562     printf("rax,  = "); _rax.print(); printf("\n");
6563     printf("rbx,  = "); _rbx.print(); printf("\n");
6564     printf("rcx  = "); _rcx.print(); printf("\n");
6565     printf("rdx  = "); _rdx.print(); printf("\n");
6566     printf("rdi  = "); _rdi.print(); printf("\n");
6567     printf("rsi  = "); _rsi.print(); printf("\n");
6568     printf("rbp,  = "); _rbp.print(); printf("\n");
6569     printf("rsp  = "); _rsp.print(); printf("\n");
6570     printf("\n");
6571     // control registers
6572     printf("flgs = "); _eflags.print(); printf("\n");
6573   }
6574 };
6575 
6576 
6577 class CPU_State {
6578  public:
6579   FPU_State _fpu_state;
6580   IU_State  _iu_state;
6581 
6582   void print() const {
6583     printf("--------------------------------------------------\n");
6584     _iu_state .print();
6585     printf("\n");
6586     _fpu_state.print();
6587     printf("--------------------------------------------------\n");
6588   }
6589 
6590 };
6591 
6592 
6593 static void _print_CPU_state(CPU_State* state) {
6594   state->print();
6595 };
6596 
6597 
6598 void MacroAssembler::print_CPU_state() {
6599   push_CPU_state();
6600   push(rsp);                // pass CPU state
6601   call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
6602   addptr(rsp, wordSize);       // discard argument
6603   pop_CPU_state();
6604 }
6605 
6606 
6607 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
6608   static int counter = 0;
6609   FPU_State* fs = &state->_fpu_state;
6610   counter++;
6611   // For leaf calls, only verify that the top few elements remain empty.
6612   // We only need 1 empty at the top for C2 code.
6613   if( stack_depth < 0 ) {
6614     if( fs->tag_for_st(7) != 3 ) {
6615       printf("FPR7 not empty\n");
6616       state->print();
6617       assert(false, "error");
6618       return false;
6619     }
6620     return true;                // All other stack states do not matter
6621   }
6622 
6623   assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
6624          "bad FPU control word");
6625 
6626   // compute stack depth
6627   int i = 0;
6628   while (i < FPU_State::number_of_registers && fs->tag_for_st(i)  < 3) i++;
6629   int d = i;
6630   while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
6631   // verify findings
6632   if (i != FPU_State::number_of_registers) {
6633     // stack not contiguous
6634     printf("%s: stack not contiguous at ST%d\n", s, i);
6635     state->print();
6636     assert(false, "error");
6637     return false;
6638   }
6639   // check if computed stack depth corresponds to expected stack depth
6640   if (stack_depth < 0) {
6641     // expected stack depth is -stack_depth or less
6642     if (d > -stack_depth) {
6643       // too many elements on the stack
6644       printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
6645       state->print();
6646       assert(false, "error");
6647       return false;
6648     }
6649   } else {
6650     // expected stack depth is stack_depth
6651     if (d != stack_depth) {
6652       // wrong stack depth
6653       printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
6654       state->print();
6655       assert(false, "error");
6656       return false;
6657     }
6658   }
6659   // everything is cool
6660   return true;
6661 }
6662 
6663 
6664 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
6665   if (!VerifyFPU) return;
6666   push_CPU_state();
6667   push(rsp);                // pass CPU state
6668   ExternalAddress msg((address) s);
6669   // pass message string s
6670   pushptr(msg.addr());
6671   push(stack_depth);        // pass stack depth
6672   call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
6673   addptr(rsp, 3 * wordSize);   // discard arguments
6674   // check for error
6675   { Label L;
6676     testl(rax, rax);
6677     jcc(Assembler::notZero, L);
6678     int3();                  // break if error condition
6679     bind(L);
6680   }
6681   pop_CPU_state();
6682 }
6683 
6684 void MacroAssembler::restore_cpu_control_state_after_jni() {
6685   // Either restore the MXCSR register after returning from the JNI Call
6686   // or verify that it wasn't changed (with -Xcheck:jni flag).
6687   if (VM_Version::supports_sse()) {
6688     if (RestoreMXCSROnJNICalls) {
6689       ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
6690     } else if (CheckJNICalls) {
6691       call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
6692     }
6693   }
6694   if (VM_Version::supports_avx()) {
6695     // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
6696     vzeroupper();
6697   }
6698 
6699 #ifndef _LP64
6700   // Either restore the x87 floating pointer control word after returning
6701   // from the JNI call or verify that it wasn't changed.
6702   if (CheckJNICalls) {
6703     call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
6704   }
6705 #endif // _LP64
6706 }
6707 
6708 
6709 void MacroAssembler::load_klass(Register dst, Register src) {
6710 #ifdef _LP64
6711   if (UseCompressedClassPointers) {
6712     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
6713     decode_klass_not_null(dst);
6714   } else
6715 #endif
6716     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
6717 }
6718 
6719 void MacroAssembler::load_prototype_header(Register dst, Register src) {
6720   load_klass(dst, src);
6721   movptr(dst, Address(dst, Klass::prototype_header_offset()));
6722 }
6723 
6724 void MacroAssembler::store_klass(Register dst, Register src) {
6725 #ifdef _LP64
6726   if (UseCompressedClassPointers) {
6727     encode_klass_not_null(src);
6728     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
6729   } else
6730 #endif
6731     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
6732 }
6733 
6734 void MacroAssembler::load_heap_oop(Register dst, Address src) {
6735 #ifdef _LP64
6736   // FIXME: Must change all places where we try to load the klass.
6737   if (UseCompressedOops) {
6738     movl(dst, src);
6739     decode_heap_oop(dst);
6740   } else
6741 #endif
6742     movptr(dst, src);
6743 }
6744 
6745 // Doesn't do verfication, generates fixed size code
6746 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
6747 #ifdef _LP64
6748   if (UseCompressedOops) {
6749     movl(dst, src);
6750     decode_heap_oop_not_null(dst);
6751   } else
6752 #endif
6753     movptr(dst, src);
6754 }
6755 
6756 void MacroAssembler::store_heap_oop(Address dst, Register src) {
6757 #ifdef _LP64
6758   if (UseCompressedOops) {
6759     assert(!dst.uses(src), "not enough registers");
6760     encode_heap_oop(src);
6761     movl(dst, src);
6762   } else
6763 #endif
6764     movptr(dst, src);
6765 }
6766 
6767 void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
6768   assert_different_registers(src1, tmp);
6769 #ifdef _LP64
6770   if (UseCompressedOops) {
6771     bool did_push = false;
6772     if (tmp == noreg) {
6773       tmp = rax;
6774       push(tmp);
6775       did_push = true;
6776       assert(!src2.uses(rsp), "can't push");
6777     }
6778     load_heap_oop(tmp, src2);
6779     cmpptr(src1, tmp);
6780     if (did_push)  pop(tmp);
6781   } else
6782 #endif
6783     cmpptr(src1, src2);
6784 }
6785 
6786 // Used for storing NULLs.
6787 void MacroAssembler::store_heap_oop_null(Address dst) {
6788 #ifdef _LP64
6789   if (UseCompressedOops) {
6790     movl(dst, (int32_t)NULL_WORD);
6791   } else {
6792     movslq(dst, (int32_t)NULL_WORD);
6793   }
6794 #else
6795   movl(dst, (int32_t)NULL_WORD);
6796 #endif
6797 }
6798 
6799 #ifdef _LP64
6800 void MacroAssembler::store_klass_gap(Register dst, Register src) {
6801   if (UseCompressedClassPointers) {
6802     // Store to klass gap in destination
6803     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
6804   }
6805 }
6806 
6807 #ifdef ASSERT
6808 void MacroAssembler::verify_heapbase(const char* msg) {
6809   assert (UseCompressedOops, "should be compressed");
6810   assert (Universe::heap() != NULL, "java heap should be initialized");
6811   if (CheckCompressedOops) {
6812     Label ok;
6813     push(rscratch1); // cmpptr trashes rscratch1
6814     cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
6815     jcc(Assembler::equal, ok);
6816     STOP(msg);
6817     bind(ok);
6818     pop(rscratch1);
6819   }
6820 }
6821 #endif
6822 
6823 // Algorithm must match oop.inline.hpp encode_heap_oop.
6824 void MacroAssembler::encode_heap_oop(Register r) {
6825 #ifdef ASSERT
6826   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
6827 #endif
6828   verify_oop(r, "broken oop in encode_heap_oop");
6829   if (Universe::narrow_oop_base() == NULL) {
6830     if (Universe::narrow_oop_shift() != 0) {
6831       assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
6832       shrq(r, LogMinObjAlignmentInBytes);
6833     }
6834     return;
6835   }
6836   testq(r, r);
6837   cmovq(Assembler::equal, r, r12_heapbase);
6838   subq(r, r12_heapbase);
6839   shrq(r, LogMinObjAlignmentInBytes);
6840 }
6841 
6842 void MacroAssembler::encode_heap_oop_not_null(Register r) {
6843 #ifdef ASSERT
6844   verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
6845   if (CheckCompressedOops) {
6846     Label ok;
6847     testq(r, r);
6848     jcc(Assembler::notEqual, ok);
6849     STOP("null oop passed to encode_heap_oop_not_null");
6850     bind(ok);
6851   }
6852 #endif
6853   verify_oop(r, "broken oop in encode_heap_oop_not_null");
6854   if (Universe::narrow_oop_base() != NULL) {
6855     subq(r, r12_heapbase);
6856   }
6857   if (Universe::narrow_oop_shift() != 0) {
6858     assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
6859     shrq(r, LogMinObjAlignmentInBytes);
6860   }
6861 }
6862 
6863 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
6864 #ifdef ASSERT
6865   verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
6866   if (CheckCompressedOops) {
6867     Label ok;
6868     testq(src, src);
6869     jcc(Assembler::notEqual, ok);
6870     STOP("null oop passed to encode_heap_oop_not_null2");
6871     bind(ok);
6872   }
6873 #endif
6874   verify_oop(src, "broken oop in encode_heap_oop_not_null2");
6875   if (dst != src) {
6876     movq(dst, src);
6877   }
6878   if (Universe::narrow_oop_base() != NULL) {
6879     subq(dst, r12_heapbase);
6880   }
6881   if (Universe::narrow_oop_shift() != 0) {
6882     assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
6883     shrq(dst, LogMinObjAlignmentInBytes);
6884   }
6885 }
6886 
6887 void  MacroAssembler::decode_heap_oop(Register r) {
6888 #ifdef ASSERT
6889   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
6890 #endif
6891   if (Universe::narrow_oop_base() == NULL) {
6892     if (Universe::narrow_oop_shift() != 0) {
6893       assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
6894       shlq(r, LogMinObjAlignmentInBytes);
6895     }
6896   } else {
6897     Label done;
6898     shlq(r, LogMinObjAlignmentInBytes);
6899     jccb(Assembler::equal, done);
6900     addq(r, r12_heapbase);
6901     bind(done);
6902   }
6903   verify_oop(r, "broken oop in decode_heap_oop");
6904 }
6905 
6906 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
6907   // Note: it will change flags
6908   assert (UseCompressedOops, "should only be used for compressed headers");
6909   assert (Universe::heap() != NULL, "java heap should be initialized");
6910   // Cannot assert, unverified entry point counts instructions (see .ad file)
6911   // vtableStubs also counts instructions in pd_code_size_limit.
6912   // Also do not verify_oop as this is called by verify_oop.
6913   if (Universe::narrow_oop_shift() != 0) {
6914     assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
6915     shlq(r, LogMinObjAlignmentInBytes);
6916     if (Universe::narrow_oop_base() != NULL) {
6917       addq(r, r12_heapbase);
6918     }
6919   } else {
6920     assert (Universe::narrow_oop_base() == NULL, "sanity");
6921   }
6922 }
6923 
6924 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
6925   // Note: it will change flags
6926   assert (UseCompressedOops, "should only be used for compressed headers");
6927   assert (Universe::heap() != NULL, "java heap should be initialized");
6928   // Cannot assert, unverified entry point counts instructions (see .ad file)
6929   // vtableStubs also counts instructions in pd_code_size_limit.
6930   // Also do not verify_oop as this is called by verify_oop.
6931   if (Universe::narrow_oop_shift() != 0) {
6932     assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
6933     if (LogMinObjAlignmentInBytes == Address::times_8) {
6934       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
6935     } else {
6936       if (dst != src) {
6937         movq(dst, src);
6938       }
6939       shlq(dst, LogMinObjAlignmentInBytes);
6940       if (Universe::narrow_oop_base() != NULL) {
6941         addq(dst, r12_heapbase);
6942       }
6943     }
6944   } else {
6945     assert (Universe::narrow_oop_base() == NULL, "sanity");
6946     if (dst != src) {
6947       movq(dst, src);
6948     }
6949   }
6950 }
6951 
6952 void MacroAssembler::encode_klass_not_null(Register r) {
6953   if (Universe::narrow_klass_base() != NULL) {
6954     // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
6955     assert(r != r12_heapbase, "Encoding a klass in r12");
6956     mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
6957     subq(r, r12_heapbase);
6958   }
6959   if (Universe::narrow_klass_shift() != 0) {
6960     assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
6961     shrq(r, LogKlassAlignmentInBytes);
6962   }
6963   if (Universe::narrow_klass_base() != NULL) {
6964     reinit_heapbase();
6965   }
6966 }
6967 
6968 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
6969   if (dst == src) {
6970     encode_klass_not_null(src);
6971   } else {
6972     if (Universe::narrow_klass_base() != NULL) {
6973       mov64(dst, (int64_t)Universe::narrow_klass_base());
6974       negq(dst);
6975       addq(dst, src);
6976     } else {
6977       movptr(dst, src);
6978     }
6979     if (Universe::narrow_klass_shift() != 0) {
6980       assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
6981       shrq(dst, LogKlassAlignmentInBytes);
6982     }
6983   }
6984 }
6985 
6986 // Function instr_size_for_decode_klass_not_null() counts the instructions
6987 // generated by decode_klass_not_null(register r) and reinit_heapbase(),
6988 // when (Universe::heap() != NULL).  Hence, if the instructions they
6989 // generate change, then this method needs to be updated.
6990 int MacroAssembler::instr_size_for_decode_klass_not_null() {
6991   assert (UseCompressedClassPointers, "only for compressed klass ptrs");
6992   if (Universe::narrow_klass_base() != NULL) {
6993     // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
6994     return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
6995   } else {
6996     // longest load decode klass function, mov64, leaq
6997     return 16;
6998   }
6999 }
7000 
7001 // !!! If the instructions that get generated here change then function
7002 // instr_size_for_decode_klass_not_null() needs to get updated.
7003 void  MacroAssembler::decode_klass_not_null(Register r) {
7004   // Note: it will change flags
7005   assert (UseCompressedClassPointers, "should only be used for compressed headers");
7006   assert(r != r12_heapbase, "Decoding a klass in r12");
7007   // Cannot assert, unverified entry point counts instructions (see .ad file)
7008   // vtableStubs also counts instructions in pd_code_size_limit.
7009   // Also do not verify_oop as this is called by verify_oop.
7010   if (Universe::narrow_klass_shift() != 0) {
7011     assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
7012     shlq(r, LogKlassAlignmentInBytes);
7013   }
7014   // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
7015   if (Universe::narrow_klass_base() != NULL) {
7016     mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
7017     addq(r, r12_heapbase);
7018     reinit_heapbase();
7019   }
7020 }
7021 
7022 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
7023   // Note: it will change flags
7024   assert (UseCompressedClassPointers, "should only be used for compressed headers");
7025   if (dst == src) {
7026     decode_klass_not_null(dst);
7027   } else {
7028     // Cannot assert, unverified entry point counts instructions (see .ad file)
7029     // vtableStubs also counts instructions in pd_code_size_limit.
7030     // Also do not verify_oop as this is called by verify_oop.
7031     mov64(dst, (int64_t)Universe::narrow_klass_base());
7032     if (Universe::narrow_klass_shift() != 0) {
7033       assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
7034       assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
7035       leaq(dst, Address(dst, src, Address::times_8, 0));
7036     } else {
7037       addq(dst, src);
7038     }
7039   }
7040 }
7041 
7042 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
7043   assert (UseCompressedOops, "should only be used for compressed headers");
7044   assert (Universe::heap() != NULL, "java heap should be initialized");
7045   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
7046   int oop_index = oop_recorder()->find_index(obj);
7047   RelocationHolder rspec = oop_Relocation::spec(oop_index);
7048   mov_narrow_oop(dst, oop_index, rspec);
7049 }
7050 
7051 void  MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
7052   assert (UseCompressedOops, "should only be used for compressed headers");
7053   assert (Universe::heap() != NULL, "java heap should be initialized");
7054   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
7055   int oop_index = oop_recorder()->find_index(obj);
7056   RelocationHolder rspec = oop_Relocation::spec(oop_index);
7057   mov_narrow_oop(dst, oop_index, rspec);
7058 }
7059 
7060 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
7061   assert (UseCompressedClassPointers, "should only be used for compressed headers");
7062   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
7063   int klass_index = oop_recorder()->find_index(k);
7064   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
7065   mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
7066 }
7067 
7068 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
7069   assert (UseCompressedClassPointers, "should only be used for compressed headers");
7070   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
7071   int klass_index = oop_recorder()->find_index(k);
7072   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
7073   mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
7074 }
7075 
7076 void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
7077   assert (UseCompressedOops, "should only be used for compressed headers");
7078   assert (Universe::heap() != NULL, "java heap should be initialized");
7079   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
7080   int oop_index = oop_recorder()->find_index(obj);
7081   RelocationHolder rspec = oop_Relocation::spec(oop_index);
7082   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
7083 }
7084 
7085 void  MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
7086   assert (UseCompressedOops, "should only be used for compressed headers");
7087   assert (Universe::heap() != NULL, "java heap should be initialized");
7088   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
7089   int oop_index = oop_recorder()->find_index(obj);
7090   RelocationHolder rspec = oop_Relocation::spec(oop_index);
7091   Assembler::cmp_narrow_oop(dst, oop_index, rspec);
7092 }
7093 
7094 void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
7095   assert (UseCompressedClassPointers, "should only be used for compressed headers");
7096   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
7097   int klass_index = oop_recorder()->find_index(k);
7098   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
7099   Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
7100 }
7101 
7102 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
7103   assert (UseCompressedClassPointers, "should only be used for compressed headers");
7104   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
7105   int klass_index = oop_recorder()->find_index(k);
7106   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
7107   Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
7108 }
7109 
7110 void MacroAssembler::reinit_heapbase() {
7111   if (UseCompressedOops || UseCompressedClassPointers) {
7112     if (Universe::heap() != NULL) {
7113       if (Universe::narrow_oop_base() == NULL) {
7114         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
7115       } else {
7116         mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
7117       }
7118     } else {
7119       movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
7120     }
7121   }
7122 }
7123 
7124 #endif // _LP64
7125 
7126 
7127 // C2 compiled method's prolog code.
7128 void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b) {
7129 
7130   // WARNING: Initial instruction MUST be 5 bytes or longer so that
7131   // NativeJump::patch_verified_entry will be able to patch out the entry
7132   // code safely. The push to verify stack depth is ok at 5 bytes,
7133   // the frame allocation can be either 3 or 6 bytes. So if we don't do
7134   // stack bang then we must use the 6 byte frame allocation even if
7135   // we have no frame. :-(
7136   assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect");
7137 
7138   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
7139   // Remove word for return addr
7140   framesize -= wordSize;
7141   stack_bang_size -= wordSize;
7142 
7143   // Calls to C2R adapters often do not accept exceptional returns.
7144   // We require that their callers must bang for them.  But be careful, because
7145   // some VM calls (such as call site linkage) can use several kilobytes of
7146   // stack.  But the stack safety zone should account for that.
7147   // See bugs 4446381, 4468289, 4497237.
7148   if (stack_bang_size > 0) {
7149     generate_stack_overflow_check(stack_bang_size);
7150 
7151     // We always push rbp, so that on return to interpreter rbp, will be
7152     // restored correctly and we can correct the stack.
7153     push(rbp);
7154     // Save caller's stack pointer into RBP if the frame pointer is preserved.
7155     if (PreserveFramePointer) {
7156       mov(rbp, rsp);
7157     }
7158     // Remove word for ebp
7159     framesize -= wordSize;
7160 
7161     // Create frame
7162     if (framesize) {
7163       subptr(rsp, framesize);
7164     }
7165   } else {
7166     // Create frame (force generation of a 4 byte immediate value)
7167     subptr_imm32(rsp, framesize);
7168 
7169     // Save RBP register now.
7170     framesize -= wordSize;
7171     movptr(Address(rsp, framesize), rbp);
7172     // Save caller's stack pointer into RBP if the frame pointer is preserved.
7173     if (PreserveFramePointer) {
7174       movptr(rbp, rsp);
7175       if (framesize > 0) {
7176         addptr(rbp, framesize);
7177       }
7178     }
7179   }
7180 
7181   if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
7182     framesize -= wordSize;
7183     movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
7184   }
7185 
7186 #ifndef _LP64
7187   // If method sets FPU control word do it now
7188   if (fp_mode_24b) {
7189     fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
7190   }
7191   if (UseSSE >= 2 && VerifyFPU) {
7192     verify_FPU(0, "FPU stack must be clean on entry");
7193   }
7194 #endif
7195 
7196 #ifdef ASSERT
7197   if (VerifyStackAtCalls) {
7198     Label L;
7199     push(rax);
7200     mov(rax, rsp);
7201     andptr(rax, StackAlignmentInBytes-1);
7202     cmpptr(rax, StackAlignmentInBytes-wordSize);
7203     pop(rax);
7204     jcc(Assembler::equal, L);
7205     STOP("Stack is not properly aligned!");
7206     bind(L);
7207   }
7208 #endif
7209 
7210 }
7211 
7212 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp) {
7213   // cnt - number of qwords (8-byte words).
7214   // base - start address, qword aligned.
7215   assert(base==rdi, "base register must be edi for rep stos");
7216   assert(tmp==rax,   "tmp register must be eax for rep stos");
7217   assert(cnt==rcx,   "cnt register must be ecx for rep stos");
7218 
7219   xorptr(tmp, tmp);
7220   if (UseFastStosb) {
7221     shlptr(cnt,3); // convert to number of bytes
7222     rep_stosb();
7223   } else {
7224     NOT_LP64(shlptr(cnt,1);) // convert to number of dwords for 32-bit VM
7225     rep_stos();
7226   }
7227 }
7228 
7229 #ifdef COMPILER2
7230 
7231 // IndexOf for constant substrings with size >= 8 chars
7232 // which don't need to be loaded through stack.
7233 void MacroAssembler::string_indexofC8(Register str1, Register str2,
7234                                       Register cnt1, Register cnt2,
7235                                       int int_cnt2,  Register result,
7236                                       XMMRegister vec, Register tmp,
7237                                       int ae) {
7238   ShortBranchVerifier sbv(this);
7239   assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required");
7240   assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
7241   assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
7242 
7243   // This method uses the pcmpestri instruction with bound registers
7244   //   inputs:
7245   //     xmm - substring
7246   //     rax - substring length (elements count)
7247   //     mem - scanned string
7248   //     rdx - string length (elements count)
7249   //     0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
7250   //     0xc - mode: 1100 (substring search) + 00 (unsigned bytes)
7251   //   outputs:
7252   //     rcx - matched index in string
7253   assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
7254   int mode   = (ae == StrIntrinsicNode::LL) ? 0x0c : 0x0d; // bytes or shorts
7255   int stride = (ae == StrIntrinsicNode::LL) ? 16 : 8; //UU, UL -> 8
7256   Address::ScaleFactor scale1 = (ae == StrIntrinsicNode::LL) ? Address::times_1 : Address::times_2;
7257   Address::ScaleFactor scale2 = (ae == StrIntrinsicNode::UL) ? Address::times_1 : scale1;
7258 
7259   Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
7260         RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
7261         MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
7262 
7263   // Note, inline_string_indexOf() generates checks:
7264   // if (substr.count > string.count) return -1;
7265   // if (substr.count == 0) return 0;
7266   assert(int_cnt2 >= stride, "this code is used only for cnt2 >= 8 chars");
7267 
7268   // Load substring.
7269   if (ae == StrIntrinsicNode::UL) {
7270     pmovzxbw(vec, Address(str2, 0));
7271   } else {
7272     movdqu(vec, Address(str2, 0));
7273   }
7274   movl(cnt2, int_cnt2);
7275   movptr(result, str1); // string addr
7276 
7277   if (int_cnt2 > stride) {
7278     jmpb(SCAN_TO_SUBSTR);
7279 
7280     // Reload substr for rescan, this code
7281     // is executed only for large substrings (> 8 chars)
7282     bind(RELOAD_SUBSTR);
7283     if (ae == StrIntrinsicNode::UL) {
7284       pmovzxbw(vec, Address(str2, 0));
7285     } else {
7286       movdqu(vec, Address(str2, 0));
7287     }
7288     negptr(cnt2); // Jumped here with negative cnt2, convert to positive
7289 
7290     bind(RELOAD_STR);
7291     // We came here after the beginning of the substring was
7292     // matched but the rest of it was not so we need to search
7293     // again. Start from the next element after the previous match.
7294 
7295     // cnt2 is number of substring reminding elements and
7296     // cnt1 is number of string reminding elements when cmp failed.
7297     // Restored cnt1 = cnt1 - cnt2 + int_cnt2
7298     subl(cnt1, cnt2);
7299     addl(cnt1, int_cnt2);
7300     movl(cnt2, int_cnt2); // Now restore cnt2
7301 
7302     decrementl(cnt1);     // Shift to next element
7303     cmpl(cnt1, cnt2);
7304     jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
7305 
7306     addptr(result, (1<<scale1));
7307 
7308   } // (int_cnt2 > 8)
7309 
7310   // Scan string for start of substr in 16-byte vectors
7311   bind(SCAN_TO_SUBSTR);
7312   pcmpestri(vec, Address(result, 0), mode);
7313   jccb(Assembler::below, FOUND_CANDIDATE);   // CF == 1
7314   subl(cnt1, stride);
7315   jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
7316   cmpl(cnt1, cnt2);
7317   jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
7318   addptr(result, 16);
7319   jmpb(SCAN_TO_SUBSTR);
7320 
7321   // Found a potential substr
7322   bind(FOUND_CANDIDATE);
7323   // Matched whole vector if first element matched (tmp(rcx) == 0).
7324   if (int_cnt2 == stride) {
7325     jccb(Assembler::overflow, RET_FOUND);    // OF == 1
7326   } else { // int_cnt2 > 8
7327     jccb(Assembler::overflow, FOUND_SUBSTR);
7328   }
7329   // After pcmpestri tmp(rcx) contains matched element index
7330   // Compute start addr of substr
7331   lea(result, Address(result, tmp, scale1));
7332 
7333   // Make sure string is still long enough
7334   subl(cnt1, tmp);
7335   cmpl(cnt1, cnt2);
7336   if (int_cnt2 == stride) {
7337     jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
7338   } else { // int_cnt2 > 8
7339     jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
7340   }
7341   // Left less then substring.
7342 
7343   bind(RET_NOT_FOUND);
7344   movl(result, -1);
7345   jmpb(EXIT);
7346 
7347   if (int_cnt2 > stride) {
7348     // This code is optimized for the case when whole substring
7349     // is matched if its head is matched.
7350     bind(MATCH_SUBSTR_HEAD);
7351     pcmpestri(vec, Address(result, 0), mode);
7352     // Reload only string if does not match
7353     jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
7354 
7355     Label CONT_SCAN_SUBSTR;
7356     // Compare the rest of substring (> 8 chars).
7357     bind(FOUND_SUBSTR);
7358     // First 8 chars are already matched.
7359     negptr(cnt2);
7360     addptr(cnt2, stride);
7361 
7362     bind(SCAN_SUBSTR);
7363     subl(cnt1, stride);
7364     cmpl(cnt2, -stride); // Do not read beyond substring
7365     jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
7366     // Back-up strings to avoid reading beyond substring:
7367     // cnt1 = cnt1 - cnt2 + 8
7368     addl(cnt1, cnt2); // cnt2 is negative
7369     addl(cnt1, stride);
7370     movl(cnt2, stride); negptr(cnt2);
7371     bind(CONT_SCAN_SUBSTR);
7372     if (int_cnt2 < (int)G) {
7373       int tail_off1 = int_cnt2<<scale1;
7374       int tail_off2 = int_cnt2<<scale2;
7375       if (ae == StrIntrinsicNode::UL) {
7376         pmovzxbw(vec, Address(str2, cnt2, scale2, tail_off2));
7377       } else {
7378         movdqu(vec, Address(str2, cnt2, scale2, tail_off2));
7379       }
7380       pcmpestri(vec, Address(result, cnt2, scale1, tail_off1), mode);
7381     } else {
7382       // calculate index in register to avoid integer overflow (int_cnt2*2)
7383       movl(tmp, int_cnt2);
7384       addptr(tmp, cnt2);
7385       if (ae == StrIntrinsicNode::UL) {
7386         pmovzxbw(vec, Address(str2, tmp, scale2, 0));
7387       } else {
7388         movdqu(vec, Address(str2, tmp, scale2, 0));
7389       }
7390       pcmpestri(vec, Address(result, tmp, scale1, 0), mode);
7391     }
7392     // Need to reload strings pointers if not matched whole vector
7393     jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
7394     addptr(cnt2, stride);
7395     jcc(Assembler::negative, SCAN_SUBSTR);
7396     // Fall through if found full substring
7397 
7398   } // (int_cnt2 > 8)
7399 
7400   bind(RET_FOUND);
7401   // Found result if we matched full small substring.
7402   // Compute substr offset
7403   subptr(result, str1);
7404   if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
7405     shrl(result, 1); // index
7406   }
7407   bind(EXIT);
7408 
7409 } // string_indexofC8
7410 
7411 // Small strings are loaded through stack if they cross page boundary.
7412 void MacroAssembler::string_indexof(Register str1, Register str2,
7413                                     Register cnt1, Register cnt2,
7414                                     int int_cnt2,  Register result,
7415                                     XMMRegister vec, Register tmp,
7416                                     int ae) {
7417   ShortBranchVerifier sbv(this);
7418   assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required");
7419   assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
7420   assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
7421 
7422   //
7423   // int_cnt2 is length of small (< 8 chars) constant substring
7424   // or (-1) for non constant substring in which case its length
7425   // is in cnt2 register.
7426   //
7427   // Note, inline_string_indexOf() generates checks:
7428   // if (substr.count > string.count) return -1;
7429   // if (substr.count == 0) return 0;
7430   //
7431   int stride = (ae == StrIntrinsicNode::LL) ? 16 : 8; //UU, UL -> 8
7432   assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < stride), "should be != 0");
7433   // This method uses the pcmpestri instruction with bound registers
7434   //   inputs:
7435   //     xmm - substring
7436   //     rax - substring length (elements count)
7437   //     mem - scanned string
7438   //     rdx - string length (elements count)
7439   //     0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
7440   //     0xc - mode: 1100 (substring search) + 00 (unsigned bytes)
7441   //   outputs:
7442   //     rcx - matched index in string
7443   assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
7444   int mode = (ae == StrIntrinsicNode::LL) ? 0x0c : 0x0d; // bytes or shorts
7445   Address::ScaleFactor scale1 = (ae == StrIntrinsicNode::LL) ? Address::times_1 : Address::times_2;
7446   Address::ScaleFactor scale2 = (ae == StrIntrinsicNode::UL) ? Address::times_1 : scale1;
7447 
7448   Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
7449         RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
7450         FOUND_CANDIDATE;
7451 
7452   { //========================================================
7453     // We don't know where these strings are located
7454     // and we can't read beyond them. Load them through stack.
7455     Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
7456 
7457     movptr(tmp, rsp); // save old SP
7458 
7459     if (int_cnt2 > 0) {     // small (< 8 chars) constant substring
7460       if (int_cnt2 == (1>>scale2)) { // One byte
7461         assert((ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UL), "Only possible for latin1 encoding");
7462         load_unsigned_byte(result, Address(str2, 0));
7463         movdl(vec, result); // move 32 bits
7464       } else if (ae == StrIntrinsicNode::LL && int_cnt2 == 3) {  // Three bytes
7465         // Not enough header space in 32-bit VM: 12+3 = 15.
7466         movl(result, Address(str2, -1));
7467         shrl(result, 8);
7468         movdl(vec, result); // move 32 bits
7469       } else if (ae != StrIntrinsicNode::UL && int_cnt2 == (2>>scale2)) {  // One char
7470         load_unsigned_short(result, Address(str2, 0));
7471         movdl(vec, result); // move 32 bits
7472       } else if (ae != StrIntrinsicNode::UL && int_cnt2 == (4>>scale2)) { // Two chars
7473         movdl(vec, Address(str2, 0)); // move 32 bits
7474       } else if (ae != StrIntrinsicNode::UL && int_cnt2 == (8>>scale2)) { // Four chars
7475         movq(vec, Address(str2, 0));  // move 64 bits
7476       } else { // cnt2 = { 3, 5, 6, 7 } || (ae == StrIntrinsicNode::UL && cnt2 ={2, ..., 7})
7477         // Array header size is 12 bytes in 32-bit VM
7478         // + 6 bytes for 3 chars == 18 bytes,
7479         // enough space to load vec and shift.
7480         assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
7481         if (ae == StrIntrinsicNode::UL) {
7482           int tail_off = int_cnt2-8;
7483           pmovzxbw(vec, Address(str2, tail_off));
7484           psrldq(vec, -2*tail_off);
7485         }
7486         else {
7487           int tail_off = int_cnt2*(1<<scale2);
7488           movdqu(vec, Address(str2, tail_off-16));
7489           psrldq(vec, 16-tail_off);
7490         }
7491       }
7492     } else { // not constant substring
7493       cmpl(cnt2, stride);
7494       jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
7495 
7496       // We can read beyond string if srt+16 does not cross page boundary
7497       // since heaps are aligned and mapped by pages.
7498       assert(os::vm_page_size() < (int)G, "default page should be small");
7499       movl(result, str2); // We need only low 32 bits
7500       andl(result, (os::vm_page_size()-1));
7501       cmpl(result, (os::vm_page_size()-16));
7502       jccb(Assembler::belowEqual, CHECK_STR);
7503 
7504       // Move small strings to stack to allow load 16 bytes into vec.
7505       subptr(rsp, 16);
7506       int stk_offset = wordSize-(1<<scale2);
7507       push(cnt2);
7508 
7509       bind(COPY_SUBSTR);
7510       if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UL) {
7511         load_unsigned_byte(result, Address(str2, cnt2, scale2, -1));
7512         movb(Address(rsp, cnt2, scale2, stk_offset), result);
7513       } else if (ae == StrIntrinsicNode::UU) {
7514         load_unsigned_short(result, Address(str2, cnt2, scale2, -2));
7515         movw(Address(rsp, cnt2, scale2, stk_offset), result);
7516       }
7517       decrement(cnt2);
7518       jccb(Assembler::notZero, COPY_SUBSTR);
7519 
7520       pop(cnt2);
7521       movptr(str2, rsp);  // New substring address
7522     } // non constant
7523 
7524     bind(CHECK_STR);
7525     cmpl(cnt1, stride);
7526     jccb(Assembler::aboveEqual, BIG_STRINGS);
7527 
7528     // Check cross page boundary.
7529     movl(result, str1); // We need only low 32 bits
7530     andl(result, (os::vm_page_size()-1));
7531     cmpl(result, (os::vm_page_size()-16));
7532     jccb(Assembler::belowEqual, BIG_STRINGS);
7533 
7534     subptr(rsp, 16);
7535     int stk_offset = -(1<<scale1);
7536     if (int_cnt2 < 0) { // not constant
7537       push(cnt2);
7538       stk_offset += wordSize;
7539     }
7540     movl(cnt2, cnt1);
7541 
7542     bind(COPY_STR);
7543     if (ae == StrIntrinsicNode::LL) {
7544       load_unsigned_byte(result, Address(str1, cnt2, scale1, -1));
7545       movb(Address(rsp, cnt2, scale1, stk_offset), result);
7546     } else {
7547       load_unsigned_short(result, Address(str1, cnt2, scale1, -2));
7548       movw(Address(rsp, cnt2, scale1, stk_offset), result);
7549     }
7550     decrement(cnt2);
7551     jccb(Assembler::notZero, COPY_STR);
7552 
7553     if (int_cnt2 < 0) { // not constant
7554       pop(cnt2);
7555     }
7556     movptr(str1, rsp);  // New string address
7557 
7558     bind(BIG_STRINGS);
7559     // Load substring.
7560     if (int_cnt2 < 0) { // -1
7561       if (ae == StrIntrinsicNode::UL) {
7562         pmovzxbw(vec, Address(str2, 0));
7563       } else {
7564         movdqu(vec, Address(str2, 0));
7565       }
7566       push(cnt2);       // substr count
7567       push(str2);       // substr addr
7568       push(str1);       // string addr
7569     } else {
7570       // Small (< 8 chars) constant substrings are loaded already.
7571       movl(cnt2, int_cnt2);
7572     }
7573     push(tmp);  // original SP
7574 
7575   } // Finished loading
7576 
7577   //========================================================
7578   // Start search
7579   //
7580 
7581   movptr(result, str1); // string addr
7582 
7583   if (int_cnt2  < 0) {  // Only for non constant substring
7584     jmpb(SCAN_TO_SUBSTR);
7585 
7586     // SP saved at sp+0
7587     // String saved at sp+1*wordSize
7588     // Substr saved at sp+2*wordSize
7589     // Substr count saved at sp+3*wordSize
7590 
7591     // Reload substr for rescan, this code
7592     // is executed only for large substrings (> 8 chars)
7593     bind(RELOAD_SUBSTR);
7594     movptr(str2, Address(rsp, 2*wordSize));
7595     movl(cnt2, Address(rsp, 3*wordSize));
7596     if (ae == StrIntrinsicNode::UL) {
7597       pmovzxbw(vec, Address(str2, 0));
7598     } else {
7599       movdqu(vec, Address(str2, 0));
7600     }
7601     // We came here after the beginning of the substring was
7602     // matched but the rest of it was not so we need to search
7603     // again. Start from the next element after the previous match.
7604     subptr(str1, result); // Restore counter
7605     if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
7606       shrl(str1, 1);
7607     }
7608     addl(cnt1, str1);
7609     decrementl(cnt1);   // Shift to next element
7610     cmpl(cnt1, cnt2);
7611     jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
7612 
7613     addptr(result, (1<<scale1));
7614   } // non constant
7615 
7616   // Scan string for start of substr in 16-byte vectors
7617   bind(SCAN_TO_SUBSTR);
7618   assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
7619   pcmpestri(vec, Address(result, 0), mode);
7620   jccb(Assembler::below, FOUND_CANDIDATE);   // CF == 1
7621   subl(cnt1, stride);
7622   jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
7623   cmpl(cnt1, cnt2);
7624   jccb(Assembler::negative, RET_NOT_FOUND);  // Left less then substring
7625   addptr(result, 16);
7626 
7627   bind(ADJUST_STR);
7628   cmpl(cnt1, stride); // Do not read beyond string
7629   jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
7630   // Back-up string to avoid reading beyond string.
7631   lea(result, Address(result, cnt1, scale1, -16));
7632   movl(cnt1, stride);
7633   jmpb(SCAN_TO_SUBSTR);
7634 
7635   // Found a potential substr
7636   bind(FOUND_CANDIDATE);
7637   // After pcmpestri tmp(rcx) contains matched element index
7638 
7639   // Make sure string is still long enough
7640   subl(cnt1, tmp);
7641   cmpl(cnt1, cnt2);
7642   jccb(Assembler::greaterEqual, FOUND_SUBSTR);
7643   // Left less then substring.
7644 
7645   bind(RET_NOT_FOUND);
7646   movl(result, -1);
7647   jmpb(CLEANUP);
7648 
7649   bind(FOUND_SUBSTR);
7650   // Compute start addr of substr
7651   lea(result, Address(result, tmp, scale1));
7652   if (int_cnt2 > 0) { // Constant substring
7653     // Repeat search for small substring (< 8 chars)
7654     // from new point without reloading substring.
7655     // Have to check that we don't read beyond string.
7656     cmpl(tmp, stride-int_cnt2);
7657     jccb(Assembler::greater, ADJUST_STR);
7658     // Fall through if matched whole substring.
7659   } else { // non constant
7660     assert(int_cnt2 == -1, "should be != 0");
7661 
7662     addl(tmp, cnt2);
7663     // Found result if we matched whole substring.
7664     cmpl(tmp, stride);
7665     jccb(Assembler::lessEqual, RET_FOUND);
7666 
7667     // Repeat search for small substring (<= 8 chars)
7668     // from new point 'str1' without reloading substring.
7669     cmpl(cnt2, stride);
7670     // Have to check that we don't read beyond string.
7671     jccb(Assembler::lessEqual, ADJUST_STR);
7672 
7673     Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
7674     // Compare the rest of substring (> 8 chars).
7675     movptr(str1, result);
7676 
7677     cmpl(tmp, cnt2);
7678     // First 8 chars are already matched.
7679     jccb(Assembler::equal, CHECK_NEXT);
7680 
7681     bind(SCAN_SUBSTR);
7682     pcmpestri(vec, Address(str1, 0), mode);
7683     // Need to reload strings pointers if not matched whole vector
7684     jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
7685 
7686     bind(CHECK_NEXT);
7687     subl(cnt2, stride);
7688     jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
7689     addptr(str1, 16);
7690     if (ae == StrIntrinsicNode::UL) {
7691       addptr(str2, 8);
7692     } else {
7693       addptr(str2, 16);
7694     }
7695     subl(cnt1, stride);
7696     cmpl(cnt2, stride); // Do not read beyond substring
7697     jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
7698     // Back-up strings to avoid reading beyond substring.
7699 
7700     if (ae == StrIntrinsicNode::UL) {
7701       lea(str2, Address(str2, cnt2, scale2, -8));
7702       lea(str1, Address(str1, cnt2, scale1, -16));
7703     } else {
7704       lea(str2, Address(str2, cnt2, scale2, -16));
7705       lea(str1, Address(str1, cnt2, scale1, -16));
7706     }
7707     subl(cnt1, cnt2);
7708     movl(cnt2, stride);
7709     addl(cnt1, stride);
7710     bind(CONT_SCAN_SUBSTR);
7711     if (ae == StrIntrinsicNode::UL) {
7712       pmovzxbw(vec, Address(str2, 0));
7713     } else {
7714       movdqu(vec, Address(str2, 0));
7715     }
7716     jmpb(SCAN_SUBSTR);
7717 
7718     bind(RET_FOUND_LONG);
7719     movptr(str1, Address(rsp, wordSize));
7720   } // non constant
7721 
7722   bind(RET_FOUND);
7723   // Compute substr offset
7724   subptr(result, str1);
7725   if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
7726     shrl(result, 1); // index
7727   }
7728   bind(CLEANUP);
7729   pop(rsp); // restore SP
7730 
7731 } // string_indexof
7732 
7733 void MacroAssembler::string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
7734                                          XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp) {
7735   ShortBranchVerifier sbv(this);
7736   assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required");
7737   assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
7738 
7739   int stride = 8;
7740 
7741   Label FOUND_CHAR, SCAN_TO_CHAR, SCAN_TO_CHAR_LOOP,
7742         SCAN_TO_8_CHAR, SCAN_TO_8_CHAR_LOOP, SCAN_TO_16_CHAR_LOOP,
7743         RET_NOT_FOUND, SCAN_TO_8_CHAR_INIT,
7744         FOUND_SEQ_CHAR, DONE_LABEL;
7745 
7746   movptr(result, str1);
7747   if (UseAVX >= 2) {
7748     cmpl(cnt1, stride);
7749     jccb(Assembler::less, SCAN_TO_CHAR_LOOP);
7750     cmpl(cnt1, 2*stride);
7751     jccb(Assembler::less, SCAN_TO_8_CHAR_INIT);
7752     movdl(vec1, ch);
7753     vpbroadcastw(vec1, vec1);
7754     vpxor(vec2, vec2);
7755     movl(tmp, cnt1);
7756     andl(tmp, 0xFFFFFFF0);  //vector count (in chars)
7757     andl(cnt1,0x0000000F);  //tail count (in chars)
7758 
7759     bind(SCAN_TO_16_CHAR_LOOP);
7760     vmovdqu(vec3, Address(result, 0));
7761     vpcmpeqw(vec3, vec3, vec1, 1);
7762     vptest(vec2, vec3);
7763     jcc(Assembler::carryClear, FOUND_CHAR);
7764     addptr(result, 32);
7765     subl(tmp, 2*stride);
7766     jccb(Assembler::notZero, SCAN_TO_16_CHAR_LOOP);
7767     jmp(SCAN_TO_8_CHAR);
7768     bind(SCAN_TO_8_CHAR_INIT);
7769     movdl(vec1, ch);
7770     pshuflw(vec1, vec1, 0x00);
7771     pshufd(vec1, vec1, 0);
7772     pxor(vec2, vec2);
7773   }
7774   bind(SCAN_TO_8_CHAR);
7775   cmpl(cnt1, stride);
7776   if (UseAVX >= 2) {
7777     jccb(Assembler::less, SCAN_TO_CHAR);
7778   } else {
7779     jccb(Assembler::less, SCAN_TO_CHAR_LOOP);
7780     movdl(vec1, ch);
7781     pshuflw(vec1, vec1, 0x00);
7782     pshufd(vec1, vec1, 0);
7783     pxor(vec2, vec2);
7784   }
7785   movl(tmp, cnt1);
7786   andl(tmp, 0xFFFFFFF8);  //vector count (in chars)
7787   andl(cnt1,0x00000007);  //tail count (in chars)
7788 
7789   bind(SCAN_TO_8_CHAR_LOOP);
7790   movdqu(vec3, Address(result, 0));
7791   pcmpeqw(vec3, vec1);
7792   ptest(vec2, vec3);
7793   jcc(Assembler::carryClear, FOUND_CHAR);
7794   addptr(result, 16);
7795   subl(tmp, stride);
7796   jccb(Assembler::notZero, SCAN_TO_8_CHAR_LOOP);
7797   bind(SCAN_TO_CHAR);
7798   testl(cnt1, cnt1);
7799   jcc(Assembler::zero, RET_NOT_FOUND);
7800   bind(SCAN_TO_CHAR_LOOP);
7801   load_unsigned_short(tmp, Address(result, 0));
7802   cmpl(ch, tmp);
7803   jccb(Assembler::equal, FOUND_SEQ_CHAR);
7804   addptr(result, 2);
7805   subl(cnt1, 1);
7806   jccb(Assembler::zero, RET_NOT_FOUND);
7807   jmp(SCAN_TO_CHAR_LOOP);
7808 
7809   bind(RET_NOT_FOUND);
7810   movl(result, -1);
7811   jmpb(DONE_LABEL);
7812 
7813   bind(FOUND_CHAR);
7814   if (UseAVX >= 2) {
7815     vpmovmskb(tmp, vec3);
7816   } else {
7817     pmovmskb(tmp, vec3);
7818   }
7819   bsfl(ch, tmp);
7820   addl(result, ch);
7821 
7822   bind(FOUND_SEQ_CHAR);
7823   subptr(result, str1);
7824   shrl(result, 1);
7825 
7826   bind(DONE_LABEL);
7827 } // string_indexof_char
7828 
7829 // helper function for string_compare
7830 void MacroAssembler::load_next_elements(Register elem1, Register elem2, Register str1, Register str2,
7831                                         Address::ScaleFactor scale, Address::ScaleFactor scale1,
7832                                         Address::ScaleFactor scale2, Register index, int ae) {
7833   if (ae == StrIntrinsicNode::LL) {
7834     load_unsigned_byte(elem1, Address(str1, index, scale, 0));
7835     load_unsigned_byte(elem2, Address(str2, index, scale, 0));
7836   } else if (ae == StrIntrinsicNode::UU) {
7837     load_unsigned_short(elem1, Address(str1, index, scale, 0));
7838     load_unsigned_short(elem2, Address(str2, index, scale, 0));
7839   } else {
7840     load_unsigned_byte(elem1, Address(str1, index, scale1, 0));
7841     load_unsigned_short(elem2, Address(str2, index, scale2, 0));
7842   }
7843 }
7844 
7845 // Compare strings, used for char[] and byte[].
7846 void MacroAssembler::string_compare(Register str1, Register str2,
7847                                     Register cnt1, Register cnt2, Register result,
7848                                     XMMRegister vec1, int ae) {
7849   ShortBranchVerifier sbv(this);
7850   Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
7851   Label COMPARE_WIDE_VECTORS_LOOP_FAILED;  // used only _LP64 && AVX3
7852   int stride, stride2, adr_stride, adr_stride1, adr_stride2;
7853   int stride2x2 = 0x40;
7854   Address::ScaleFactor scale, scale1, scale2;
7855 
7856   if (ae != StrIntrinsicNode::LL) {
7857     stride2x2 = 0x20;
7858   }
7859 
7860   if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) {
7861     shrl(cnt2, 1);
7862   }
7863   // Compute the minimum of the string lengths and the
7864   // difference of the string lengths (stack).
7865   // Do the conditional move stuff
7866   movl(result, cnt1);
7867   subl(cnt1, cnt2);
7868   push(cnt1);
7869   cmov32(Assembler::lessEqual, cnt2, result);    // cnt2 = min(cnt1, cnt2)
7870 
7871   // Is the minimum length zero?
7872   testl(cnt2, cnt2);
7873   jcc(Assembler::zero, LENGTH_DIFF_LABEL);
7874   if (ae == StrIntrinsicNode::LL) {
7875     // Load first bytes
7876     load_unsigned_byte(result, Address(str1, 0));  // result = str1[0]
7877     load_unsigned_byte(cnt1, Address(str2, 0));    // cnt1   = str2[0]
7878   } else if (ae == StrIntrinsicNode::UU) {
7879     // Load first characters
7880     load_unsigned_short(result, Address(str1, 0));
7881     load_unsigned_short(cnt1, Address(str2, 0));
7882   } else {
7883     load_unsigned_byte(result, Address(str1, 0));
7884     load_unsigned_short(cnt1, Address(str2, 0));
7885   }
7886   subl(result, cnt1);
7887   jcc(Assembler::notZero,  POP_LABEL);
7888 
7889   if (ae == StrIntrinsicNode::UU) {
7890     // Divide length by 2 to get number of chars
7891     shrl(cnt2, 1);
7892   }
7893   cmpl(cnt2, 1);
7894   jcc(Assembler::equal, LENGTH_DIFF_LABEL);
7895 
7896   // Check if the strings start at the same location and setup scale and stride
7897   if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
7898     cmpptr(str1, str2);
7899     jcc(Assembler::equal, LENGTH_DIFF_LABEL);
7900     if (ae == StrIntrinsicNode::LL) {
7901       scale = Address::times_1;
7902       stride = 16;
7903     } else {
7904       scale = Address::times_2;
7905       stride = 8;
7906     }
7907   } else {
7908     scale = Address::no_scale;  // not used
7909     scale1 = Address::times_1;
7910     scale2 = Address::times_2;
7911     stride = 8;
7912   }
7913 
7914   if (UseAVX >= 2 && UseSSE42Intrinsics) {
7915     assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
7916     Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR;
7917     Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR;
7918     Label COMPARE_WIDE_VECTORS_LOOP_AVX2;
7919     Label COMPARE_TAIL_LONG;
7920     Label COMPARE_WIDE_VECTORS_LOOP_AVX3;  // used only _LP64 && AVX3
7921 
7922     int pcmpmask = 0x19;
7923     if (ae == StrIntrinsicNode::LL) {
7924       pcmpmask &= ~0x01;
7925     }
7926 
7927     // Setup to compare 16-chars (32-bytes) vectors,
7928     // start from first character again because it has aligned address.
7929     if (ae == StrIntrinsicNode::LL) {
7930       stride2 = 32;
7931     } else {
7932       stride2 = 16;
7933     }
7934     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
7935       adr_stride = stride << scale;
7936     } else {
7937       adr_stride1 = 8;  //stride << scale1;
7938       adr_stride2 = 16; //stride << scale2;
7939     }
7940 
7941     assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
7942     // rax and rdx are used by pcmpestri as elements counters
7943     movl(result, cnt2);
7944     andl(cnt2, ~(stride2-1));   // cnt2 holds the vector count
7945     jcc(Assembler::zero, COMPARE_TAIL_LONG);
7946 
7947     // fast path : compare first 2 8-char vectors.
7948     bind(COMPARE_16_CHARS);
7949     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
7950       movdqu(vec1, Address(str1, 0));
7951     } else {
7952       pmovzxbw(vec1, Address(str1, 0));
7953     }
7954     pcmpestri(vec1, Address(str2, 0), pcmpmask);
7955     jccb(Assembler::below, COMPARE_INDEX_CHAR);
7956 
7957     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
7958       movdqu(vec1, Address(str1, adr_stride));
7959       pcmpestri(vec1, Address(str2, adr_stride), pcmpmask);
7960     } else {
7961       pmovzxbw(vec1, Address(str1, adr_stride1));
7962       pcmpestri(vec1, Address(str2, adr_stride2), pcmpmask);
7963     }
7964     jccb(Assembler::aboveEqual, COMPARE_WIDE_VECTORS);
7965     addl(cnt1, stride);
7966 
7967     // Compare the characters at index in cnt1
7968     bind(COMPARE_INDEX_CHAR); // cnt1 has the offset of the mismatching character
7969     load_next_elements(result, cnt2, str1, str2, scale, scale1, scale2, cnt1, ae);
7970     subl(result, cnt2);
7971     jmp(POP_LABEL);
7972 
7973     // Setup the registers to start vector comparison loop
7974     bind(COMPARE_WIDE_VECTORS);
7975     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
7976       lea(str1, Address(str1, result, scale));
7977       lea(str2, Address(str2, result, scale));
7978     } else {
7979       lea(str1, Address(str1, result, scale1));
7980       lea(str2, Address(str2, result, scale2));
7981     }
7982     subl(result, stride2);
7983     subl(cnt2, stride2);
7984     jcc(Assembler::zero, COMPARE_WIDE_TAIL);
7985     negptr(result);
7986 
7987     //  In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest)
7988     bind(COMPARE_WIDE_VECTORS_LOOP);
7989 
7990 #ifdef _LP64
7991     if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
7992       cmpl(cnt2, stride2x2);
7993       jccb(Assembler::below, COMPARE_WIDE_VECTORS_LOOP_AVX2);
7994       testl(cnt2, stride2x2-1);   // cnt2 holds the vector count
7995       jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP_AVX2);   // means we cannot subtract by 0x40
7996 
7997       bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop
7998       if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
7999         evmovdquq(vec1, Address(str1, result, scale), Assembler::AVX_512bit);
8000         evpcmpeqb(k7, vec1, Address(str2, result, scale), Assembler::AVX_512bit); // k7 == 11..11, if operands equal, otherwise k7 has some 0
8001       } else {
8002         vpmovzxbw(vec1, Address(str1, result, scale1), Assembler::AVX_512bit);
8003         evpcmpeqb(k7, vec1, Address(str2, result, scale2), Assembler::AVX_512bit); // k7 == 11..11, if operands equal, otherwise k7 has some 0
8004       }
8005       kortestql(k7, k7);
8006       jcc(Assembler::aboveEqual, COMPARE_WIDE_VECTORS_LOOP_FAILED);     // miscompare
8007       addptr(result, stride2x2);  // update since we already compared at this addr
8008       subl(cnt2, stride2x2);      // and sub the size too
8009       jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP_AVX3);
8010 
8011       vpxor(vec1, vec1);
8012       jmpb(COMPARE_WIDE_TAIL);
8013     }//if (VM_Version::supports_avx512vlbw())
8014 #endif // _LP64
8015 
8016 
8017     bind(COMPARE_WIDE_VECTORS_LOOP_AVX2);
8018     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
8019       vmovdqu(vec1, Address(str1, result, scale));
8020       vpxor(vec1, Address(str2, result, scale));
8021     } else {
8022       vpmovzxbw(vec1, Address(str1, result, scale1), Assembler::AVX_256bit);
8023       vpxor(vec1, Address(str2, result, scale2));
8024     }
8025     vptest(vec1, vec1);
8026     jcc(Assembler::notZero, VECTOR_NOT_EQUAL);
8027     addptr(result, stride2);
8028     subl(cnt2, stride2);
8029     jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
8030     // clean upper bits of YMM registers
8031     vpxor(vec1, vec1);
8032 
8033     // compare wide vectors tail
8034     bind(COMPARE_WIDE_TAIL);
8035     testptr(result, result);
8036     jccb(Assembler::zero, LENGTH_DIFF_LABEL);
8037 
8038     movl(result, stride2);
8039     movl(cnt2, result);
8040     negptr(result);
8041     jmp(COMPARE_WIDE_VECTORS_LOOP_AVX2);
8042 
8043     // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
8044     bind(VECTOR_NOT_EQUAL);
8045     // clean upper bits of YMM registers
8046     vpxor(vec1, vec1);
8047     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
8048       lea(str1, Address(str1, result, scale));
8049       lea(str2, Address(str2, result, scale));
8050     } else {
8051       lea(str1, Address(str1, result, scale1));
8052       lea(str2, Address(str2, result, scale2));
8053     }
8054     jmp(COMPARE_16_CHARS);
8055 
8056     // Compare tail chars, length between 1 to 15 chars
8057     bind(COMPARE_TAIL_LONG);
8058     movl(cnt2, result);
8059     cmpl(cnt2, stride);
8060     jccb(Assembler::less, COMPARE_SMALL_STR);
8061 
8062     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
8063       movdqu(vec1, Address(str1, 0));
8064     } else {
8065       pmovzxbw(vec1, Address(str1, 0));
8066     }
8067     pcmpestri(vec1, Address(str2, 0), pcmpmask);
8068     jcc(Assembler::below, COMPARE_INDEX_CHAR);
8069     subptr(cnt2, stride);
8070     jccb(Assembler::zero, LENGTH_DIFF_LABEL);
8071     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
8072       lea(str1, Address(str1, result, scale));
8073       lea(str2, Address(str2, result, scale));
8074     } else {
8075       lea(str1, Address(str1, result, scale1));
8076       lea(str2, Address(str2, result, scale2));
8077     }
8078     negptr(cnt2);
8079     jmpb(WHILE_HEAD_LABEL);
8080 
8081     bind(COMPARE_SMALL_STR);
8082   } else if (UseSSE42Intrinsics) {
8083     assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
8084     Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
8085     int pcmpmask = 0x19;
8086     // Setup to compare 8-char (16-byte) vectors,
8087     // start from first character again because it has aligned address.
8088     movl(result, cnt2);
8089     andl(cnt2, ~(stride - 1));   // cnt2 holds the vector count
8090     if (ae == StrIntrinsicNode::LL) {
8091       pcmpmask &= ~0x01;
8092     }
8093     jccb(Assembler::zero, COMPARE_TAIL);
8094     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
8095       lea(str1, Address(str1, result, scale));
8096       lea(str2, Address(str2, result, scale));
8097     } else {
8098       lea(str1, Address(str1, result, scale1));
8099       lea(str2, Address(str2, result, scale2));
8100     }
8101     negptr(result);
8102 
8103     // pcmpestri
8104     //   inputs:
8105     //     vec1- substring
8106     //     rax - negative string length (elements count)
8107     //     mem - scanned string
8108     //     rdx - string length (elements count)
8109     //     pcmpmask - cmp mode: 11000 (string compare with negated result)
8110     //               + 00 (unsigned bytes) or  + 01 (unsigned shorts)
8111     //   outputs:
8112     //     rcx - first mismatched element index
8113     assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
8114 
8115     bind(COMPARE_WIDE_VECTORS);
8116     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
8117       movdqu(vec1, Address(str1, result, scale));
8118       pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
8119     } else {
8120       pmovzxbw(vec1, Address(str1, result, scale1));
8121       pcmpestri(vec1, Address(str2, result, scale2), pcmpmask);
8122     }
8123     // After pcmpestri cnt1(rcx) contains mismatched element index
8124 
8125     jccb(Assembler::below, VECTOR_NOT_EQUAL);  // CF==1
8126     addptr(result, stride);
8127     subptr(cnt2, stride);
8128     jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
8129 
8130     // compare wide vectors tail
8131     testptr(result, result);
8132     jccb(Assembler::zero, LENGTH_DIFF_LABEL);
8133 
8134     movl(cnt2, stride);
8135     movl(result, stride);
8136     negptr(result);
8137     if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
8138       movdqu(vec1, Address(str1, result, scale));
8139       pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
8140     } else {
8141       pmovzxbw(vec1, Address(str1, result, scale1));
8142       pcmpestri(vec1, Address(str2, result, scale2), pcmpmask);
8143     }
8144     jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
8145 
8146     // Mismatched characters in the vectors
8147     bind(VECTOR_NOT_EQUAL);
8148     addptr(cnt1, result);
8149     load_next_elements(result, cnt2, str1, str2, scale, scale1, scale2, cnt1, ae);
8150     subl(result, cnt2);
8151     jmpb(POP_LABEL);
8152 
8153     bind(COMPARE_TAIL); // limit is zero
8154     movl(cnt2, result);
8155     // Fallthru to tail compare
8156   }
8157   // Shift str2 and str1 to the end of the arrays, negate min
8158   if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
8159     lea(str1, Address(str1, cnt2, scale));
8160     lea(str2, Address(str2, cnt2, scale));
8161   } else {
8162     lea(str1, Address(str1, cnt2, scale1));
8163     lea(str2, Address(str2, cnt2, scale2));
8164   }
8165   decrementl(cnt2);  // first character was compared already
8166   negptr(cnt2);
8167 
8168   // Compare the rest of the elements
8169   bind(WHILE_HEAD_LABEL);
8170   load_next_elements(result, cnt1, str1, str2, scale, scale1, scale2, cnt2, ae);
8171   subl(result, cnt1);
8172   jccb(Assembler::notZero, POP_LABEL);
8173   increment(cnt2);
8174   jccb(Assembler::notZero, WHILE_HEAD_LABEL);
8175 
8176   // Strings are equal up to min length.  Return the length difference.
8177   bind(LENGTH_DIFF_LABEL);
8178   pop(result);
8179   if (ae == StrIntrinsicNode::UU) {
8180     // Divide diff by 2 to get number of chars
8181     sarl(result, 1);
8182   }
8183   jmpb(DONE_LABEL);
8184 
8185 #ifdef _LP64
8186   if (VM_Version::supports_avx512vlbw()) {
8187 
8188     bind(COMPARE_WIDE_VECTORS_LOOP_FAILED);
8189 
8190     kmovql(cnt1, k7);
8191     notq(cnt1);
8192     bsfq(cnt2, cnt1);
8193     if (ae != StrIntrinsicNode::LL) {
8194       // Divide diff by 2 to get number of chars
8195       sarl(cnt2, 1);
8196     }
8197     addq(result, cnt2);
8198     if (ae == StrIntrinsicNode::LL) {
8199       load_unsigned_byte(cnt1, Address(str2, result));
8200       load_unsigned_byte(result, Address(str1, result));
8201     } else if (ae == StrIntrinsicNode::UU) {
8202       load_unsigned_short(cnt1, Address(str2, result, scale));
8203       load_unsigned_short(result, Address(str1, result, scale));
8204     } else {
8205       load_unsigned_short(cnt1, Address(str2, result, scale2));
8206       load_unsigned_byte(result, Address(str1, result, scale1));
8207     }
8208     subl(result, cnt1);
8209     jmpb(POP_LABEL);
8210   }//if (VM_Version::supports_avx512vlbw())
8211 #endif // _LP64
8212 
8213   // Discard the stored length difference
8214   bind(POP_LABEL);
8215   pop(cnt1);
8216 
8217   // That's it
8218   bind(DONE_LABEL);
8219   if(ae == StrIntrinsicNode::UL) {
8220     negl(result);
8221   }
8222 
8223 }
8224 
8225 // Search for Non-ASCII character (Negative byte value) in a byte array,
8226 // return true if it has any and false otherwise.
8227 void MacroAssembler::has_negatives(Register ary1, Register len,
8228                                    Register result, Register tmp1,
8229                                    XMMRegister vec1, XMMRegister vec2) {
8230 
8231   // rsi: byte array
8232   // rcx: len
8233   // rax: result
8234   ShortBranchVerifier sbv(this);
8235   assert_different_registers(ary1, len, result, tmp1);
8236   assert_different_registers(vec1, vec2);
8237   Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_CHAR, COMPARE_VECTORS, COMPARE_BYTE;
8238 
8239   // len == 0
8240   testl(len, len);
8241   jcc(Assembler::zero, FALSE_LABEL);
8242 
8243   movl(result, len); // copy
8244 
8245   if (UseAVX >= 2 && UseSSE >= 2) {
8246     // With AVX2, use 32-byte vector compare
8247     Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
8248 
8249     // Compare 32-byte vectors
8250     andl(result, 0x0000001f);  //   tail count (in bytes)
8251     andl(len, 0xffffffe0);   // vector count (in bytes)
8252     jccb(Assembler::zero, COMPARE_TAIL);
8253 
8254     lea(ary1, Address(ary1, len, Address::times_1));
8255     negptr(len);
8256 
8257     movl(tmp1, 0x80808080);   // create mask to test for Unicode chars in vector
8258     movdl(vec2, tmp1);
8259     vpbroadcastd(vec2, vec2);
8260 
8261     bind(COMPARE_WIDE_VECTORS);
8262     vmovdqu(vec1, Address(ary1, len, Address::times_1));
8263     vptest(vec1, vec2);
8264     jccb(Assembler::notZero, TRUE_LABEL);
8265     addptr(len, 32);
8266     jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
8267 
8268     testl(result, result);
8269     jccb(Assembler::zero, FALSE_LABEL);
8270 
8271     vmovdqu(vec1, Address(ary1, result, Address::times_1, -32));
8272     vptest(vec1, vec2);
8273     jccb(Assembler::notZero, TRUE_LABEL);
8274     jmpb(FALSE_LABEL);
8275 
8276     bind(COMPARE_TAIL); // len is zero
8277     movl(len, result);
8278     // Fallthru to tail compare
8279   } else if (UseSSE42Intrinsics) {
8280     assert(UseSSE >= 4, "SSE4 must be  for SSE4.2 intrinsics to be available");
8281     // With SSE4.2, use double quad vector compare
8282     Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
8283 
8284     // Compare 16-byte vectors
8285     andl(result, 0x0000000f);  //   tail count (in bytes)
8286     andl(len, 0xfffffff0);   // vector count (in bytes)
8287     jccb(Assembler::zero, COMPARE_TAIL);
8288 
8289     lea(ary1, Address(ary1, len, Address::times_1));
8290     negptr(len);
8291 
8292     movl(tmp1, 0x80808080);
8293     movdl(vec2, tmp1);
8294     pshufd(vec2, vec2, 0);
8295 
8296     bind(COMPARE_WIDE_VECTORS);
8297     movdqu(vec1, Address(ary1, len, Address::times_1));
8298     ptest(vec1, vec2);
8299     jccb(Assembler::notZero, TRUE_LABEL);
8300     addptr(len, 16);
8301     jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
8302 
8303     testl(result, result);
8304     jccb(Assembler::zero, FALSE_LABEL);
8305 
8306     movdqu(vec1, Address(ary1, result, Address::times_1, -16));
8307     ptest(vec1, vec2);
8308     jccb(Assembler::notZero, TRUE_LABEL);
8309     jmpb(FALSE_LABEL);
8310 
8311     bind(COMPARE_TAIL); // len is zero
8312     movl(len, result);
8313     // Fallthru to tail compare
8314   }
8315 
8316   // Compare 4-byte vectors
8317   andl(len, 0xfffffffc); // vector count (in bytes)
8318   jccb(Assembler::zero, COMPARE_CHAR);
8319 
8320   lea(ary1, Address(ary1, len, Address::times_1));
8321   negptr(len);
8322 
8323   bind(COMPARE_VECTORS);
8324   movl(tmp1, Address(ary1, len, Address::times_1));
8325   andl(tmp1, 0x80808080);
8326   jccb(Assembler::notZero, TRUE_LABEL);
8327   addptr(len, 4);
8328   jcc(Assembler::notZero, COMPARE_VECTORS);
8329 
8330   // Compare trailing char (final 2 bytes), if any
8331   bind(COMPARE_CHAR);
8332   testl(result, 0x2);   // tail  char
8333   jccb(Assembler::zero, COMPARE_BYTE);
8334   load_unsigned_short(tmp1, Address(ary1, 0));
8335   andl(tmp1, 0x00008080);
8336   jccb(Assembler::notZero, TRUE_LABEL);
8337   subptr(result, 2);
8338   lea(ary1, Address(ary1, 2));
8339 
8340   bind(COMPARE_BYTE);
8341   testl(result, 0x1);   // tail  byte
8342   jccb(Assembler::zero, FALSE_LABEL);
8343   load_unsigned_byte(tmp1, Address(ary1, 0));
8344   andl(tmp1, 0x00000080);
8345   jccb(Assembler::notEqual, TRUE_LABEL);
8346   jmpb(FALSE_LABEL);
8347 
8348   bind(TRUE_LABEL);
8349   movl(result, 1);   // return true
8350   jmpb(DONE);
8351 
8352   bind(FALSE_LABEL);
8353   xorl(result, result); // return false
8354 
8355   // That's it
8356   bind(DONE);
8357   if (UseAVX >= 2 && UseSSE >= 2) {
8358     // clean upper bits of YMM registers
8359     vpxor(vec1, vec1);
8360     vpxor(vec2, vec2);
8361   }
8362 }
8363 
8364 // Compare char[] or byte[] arrays aligned to 4 bytes or substrings.
8365 void MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register ary2,
8366                                    Register limit, Register result, Register chr,
8367                                    XMMRegister vec1, XMMRegister vec2, bool is_char) {
8368   ShortBranchVerifier sbv(this);
8369   Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR, COMPARE_BYTE;
8370 
8371   int length_offset  = arrayOopDesc::length_offset_in_bytes();
8372   int base_offset    = arrayOopDesc::base_offset_in_bytes(is_char ? T_CHAR : T_BYTE);
8373 
8374   if (is_array_equ) {
8375     // Check the input args
8376     cmpptr(ary1, ary2);
8377     jcc(Assembler::equal, TRUE_LABEL);
8378 
8379     // Need additional checks for arrays_equals.
8380     testptr(ary1, ary1);
8381     jcc(Assembler::zero, FALSE_LABEL);
8382     testptr(ary2, ary2);
8383     jcc(Assembler::zero, FALSE_LABEL);
8384 
8385     // Check the lengths
8386     movl(limit, Address(ary1, length_offset));
8387     cmpl(limit, Address(ary2, length_offset));
8388     jcc(Assembler::notEqual, FALSE_LABEL);
8389   }
8390 
8391   // count == 0
8392   testl(limit, limit);
8393   jcc(Assembler::zero, TRUE_LABEL);
8394 
8395   if (is_array_equ) {
8396     // Load array address
8397     lea(ary1, Address(ary1, base_offset));
8398     lea(ary2, Address(ary2, base_offset));
8399   }
8400 
8401   if (is_array_equ && is_char) {
8402     // arrays_equals when used for char[].
8403     shll(limit, 1);      // byte count != 0
8404   }
8405   movl(result, limit); // copy
8406 
8407   if (UseAVX >= 2) {
8408     // With AVX2, use 32-byte vector compare
8409     Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
8410 
8411     // Compare 32-byte vectors
8412     andl(result, 0x0000001f);  //   tail count (in bytes)
8413     andl(limit, 0xffffffe0);   // vector count (in bytes)
8414     jcc(Assembler::zero, COMPARE_TAIL);
8415 
8416     lea(ary1, Address(ary1, limit, Address::times_1));
8417     lea(ary2, Address(ary2, limit, Address::times_1));
8418     negptr(limit);
8419 
8420     bind(COMPARE_WIDE_VECTORS);
8421 
8422 #ifdef _LP64
8423     if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
8424       Label COMPARE_WIDE_VECTORS_LOOP_AVX2, COMPARE_WIDE_VECTORS_LOOP_AVX3;
8425 
8426       cmpl(limit, -64);
8427       jccb(Assembler::greater, COMPARE_WIDE_VECTORS_LOOP_AVX2);
8428 
8429       bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop
8430 
8431       evmovdquq(vec1, Address(ary1, limit, Address::times_1), Assembler::AVX_512bit);
8432       evpcmpeqb(k7, vec1, Address(ary2, limit, Address::times_1), Assembler::AVX_512bit);
8433       kortestql(k7, k7);
8434       jcc(Assembler::aboveEqual, FALSE_LABEL);     // miscompare
8435       addptr(limit, 64);  // update since we already compared at this addr
8436       cmpl(limit, -64);
8437       jccb(Assembler::lessEqual, COMPARE_WIDE_VECTORS_LOOP_AVX3);
8438 
8439       // At this point we may still need to compare -limit+result bytes.
8440       // We could execute the next two instruction and just continue via non-wide path:
8441       //  cmpl(limit, 0);
8442       //  jcc(Assembler::equal, COMPARE_TAIL);  // true
8443       // But since we stopped at the points ary{1,2}+limit which are
8444       // not farther than 64 bytes from the ends of arrays ary{1,2}+result
8445       // (|limit| <= 32 and result < 32),
8446       // we may just compare the last 64 bytes.
8447       //
8448       addptr(result, -64);   // it is safe, bc we just came from this area
8449       evmovdquq(vec1, Address(ary1, result, Address::times_1), Assembler::AVX_512bit);
8450       evpcmpeqb(k7, vec1, Address(ary2, result, Address::times_1), Assembler::AVX_512bit);
8451       kortestql(k7, k7);
8452       jcc(Assembler::aboveEqual, FALSE_LABEL);     // miscompare
8453 
8454       jmp(TRUE_LABEL);
8455 
8456       bind(COMPARE_WIDE_VECTORS_LOOP_AVX2);
8457 
8458     }//if (VM_Version::supports_avx512vlbw())
8459 #endif //_LP64
8460 
8461     vmovdqu(vec1, Address(ary1, limit, Address::times_1));
8462     vmovdqu(vec2, Address(ary2, limit, Address::times_1));
8463     vpxor(vec1, vec2);
8464 
8465     vptest(vec1, vec1);
8466     jccb(Assembler::notZero, FALSE_LABEL);
8467     addptr(limit, 32);
8468     jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
8469 
8470     testl(result, result);
8471     jccb(Assembler::zero, TRUE_LABEL);
8472 
8473     vmovdqu(vec1, Address(ary1, result, Address::times_1, -32));
8474     vmovdqu(vec2, Address(ary2, result, Address::times_1, -32));
8475     vpxor(vec1, vec2);
8476 
8477     vptest(vec1, vec1);
8478     jccb(Assembler::notZero, FALSE_LABEL);
8479     jmpb(TRUE_LABEL);
8480 
8481     bind(COMPARE_TAIL); // limit is zero
8482     movl(limit, result);
8483     // Fallthru to tail compare
8484   } else if (UseSSE42Intrinsics) {
8485     assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
8486     // With SSE4.2, use double quad vector compare
8487     Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
8488 
8489     // Compare 16-byte vectors
8490     andl(result, 0x0000000f);  //   tail count (in bytes)
8491     andl(limit, 0xfffffff0);   // vector count (in bytes)
8492     jccb(Assembler::zero, COMPARE_TAIL);
8493 
8494     lea(ary1, Address(ary1, limit, Address::times_1));
8495     lea(ary2, Address(ary2, limit, Address::times_1));
8496     negptr(limit);
8497 
8498     bind(COMPARE_WIDE_VECTORS);
8499     movdqu(vec1, Address(ary1, limit, Address::times_1));
8500     movdqu(vec2, Address(ary2, limit, Address::times_1));
8501     pxor(vec1, vec2);
8502 
8503     ptest(vec1, vec1);
8504     jccb(Assembler::notZero, FALSE_LABEL);
8505     addptr(limit, 16);
8506     jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
8507 
8508     testl(result, result);
8509     jccb(Assembler::zero, TRUE_LABEL);
8510 
8511     movdqu(vec1, Address(ary1, result, Address::times_1, -16));
8512     movdqu(vec2, Address(ary2, result, Address::times_1, -16));
8513     pxor(vec1, vec2);
8514 
8515     ptest(vec1, vec1);
8516     jccb(Assembler::notZero, FALSE_LABEL);
8517     jmpb(TRUE_LABEL);
8518 
8519     bind(COMPARE_TAIL); // limit is zero
8520     movl(limit, result);
8521     // Fallthru to tail compare
8522   }
8523 
8524   // Compare 4-byte vectors
8525   andl(limit, 0xfffffffc); // vector count (in bytes)
8526   jccb(Assembler::zero, COMPARE_CHAR);
8527 
8528   lea(ary1, Address(ary1, limit, Address::times_1));
8529   lea(ary2, Address(ary2, limit, Address::times_1));
8530   negptr(limit);
8531 
8532   bind(COMPARE_VECTORS);
8533   movl(chr, Address(ary1, limit, Address::times_1));
8534   cmpl(chr, Address(ary2, limit, Address::times_1));
8535   jccb(Assembler::notEqual, FALSE_LABEL);
8536   addptr(limit, 4);
8537   jcc(Assembler::notZero, COMPARE_VECTORS);
8538 
8539   // Compare trailing char (final 2 bytes), if any
8540   bind(COMPARE_CHAR);
8541   testl(result, 0x2);   // tail  char
8542   jccb(Assembler::zero, COMPARE_BYTE);
8543   load_unsigned_short(chr, Address(ary1, 0));
8544   load_unsigned_short(limit, Address(ary2, 0));
8545   cmpl(chr, limit);
8546   jccb(Assembler::notEqual, FALSE_LABEL);
8547 
8548   if (is_array_equ && is_char) {
8549     bind(COMPARE_BYTE);
8550   } else {
8551     lea(ary1, Address(ary1, 2));
8552     lea(ary2, Address(ary2, 2));
8553 
8554     bind(COMPARE_BYTE);
8555     testl(result, 0x1);   // tail  byte
8556     jccb(Assembler::zero, TRUE_LABEL);
8557     load_unsigned_byte(chr, Address(ary1, 0));
8558     load_unsigned_byte(limit, Address(ary2, 0));
8559     cmpl(chr, limit);
8560     jccb(Assembler::notEqual, FALSE_LABEL);
8561   }
8562   bind(TRUE_LABEL);
8563   movl(result, 1);   // return true
8564   jmpb(DONE);
8565 
8566   bind(FALSE_LABEL);
8567   xorl(result, result); // return false
8568 
8569   // That's it
8570   bind(DONE);
8571   if (UseAVX >= 2) {
8572     // clean upper bits of YMM registers
8573     vpxor(vec1, vec1);
8574     vpxor(vec2, vec2);
8575   }
8576 }
8577 
8578 #endif
8579 
8580 void MacroAssembler::generate_fill(BasicType t, bool aligned,
8581                                    Register to, Register value, Register count,
8582                                    Register rtmp, XMMRegister xtmp) {
8583   ShortBranchVerifier sbv(this);
8584   assert_different_registers(to, value, count, rtmp);
8585   Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
8586   Label L_fill_2_bytes, L_fill_4_bytes;
8587 
8588   int shift = -1;
8589   switch (t) {
8590     case T_BYTE:
8591       shift = 2;
8592       break;
8593     case T_SHORT:
8594       shift = 1;
8595       break;
8596     case T_INT:
8597       shift = 0;
8598       break;
8599     default: ShouldNotReachHere();
8600   }
8601 
8602   if (t == T_BYTE) {
8603     andl(value, 0xff);
8604     movl(rtmp, value);
8605     shll(rtmp, 8);
8606     orl(value, rtmp);
8607   }
8608   if (t == T_SHORT) {
8609     andl(value, 0xffff);
8610   }
8611   if (t == T_BYTE || t == T_SHORT) {
8612     movl(rtmp, value);
8613     shll(rtmp, 16);
8614     orl(value, rtmp);
8615   }
8616 
8617   cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
8618   jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
8619   if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
8620     // align source address at 4 bytes address boundary
8621     if (t == T_BYTE) {
8622       // One byte misalignment happens only for byte arrays
8623       testptr(to, 1);
8624       jccb(Assembler::zero, L_skip_align1);
8625       movb(Address(to, 0), value);
8626       increment(to);
8627       decrement(count);
8628       BIND(L_skip_align1);
8629     }
8630     // Two bytes misalignment happens only for byte and short (char) arrays
8631     testptr(to, 2);
8632     jccb(Assembler::zero, L_skip_align2);
8633     movw(Address(to, 0), value);
8634     addptr(to, 2);
8635     subl(count, 1<<(shift-1));
8636     BIND(L_skip_align2);
8637   }
8638   if (UseSSE < 2) {
8639     Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
8640     // Fill 32-byte chunks
8641     subl(count, 8 << shift);
8642     jcc(Assembler::less, L_check_fill_8_bytes);
8643     align(16);
8644 
8645     BIND(L_fill_32_bytes_loop);
8646 
8647     for (int i = 0; i < 32; i += 4) {
8648       movl(Address(to, i), value);
8649     }
8650 
8651     addptr(to, 32);
8652     subl(count, 8 << shift);
8653     jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
8654     BIND(L_check_fill_8_bytes);
8655     addl(count, 8 << shift);
8656     jccb(Assembler::zero, L_exit);
8657     jmpb(L_fill_8_bytes);
8658 
8659     //
8660     // length is too short, just fill qwords
8661     //
8662     BIND(L_fill_8_bytes_loop);
8663     movl(Address(to, 0), value);
8664     movl(Address(to, 4), value);
8665     addptr(to, 8);
8666     BIND(L_fill_8_bytes);
8667     subl(count, 1 << (shift + 1));
8668     jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
8669     // fall through to fill 4 bytes
8670   } else {
8671     Label L_fill_32_bytes;
8672     if (!UseUnalignedLoadStores) {
8673       // align to 8 bytes, we know we are 4 byte aligned to start
8674       testptr(to, 4);
8675       jccb(Assembler::zero, L_fill_32_bytes);
8676       movl(Address(to, 0), value);
8677       addptr(to, 4);
8678       subl(count, 1<<shift);
8679     }
8680     BIND(L_fill_32_bytes);
8681     {
8682       assert( UseSSE >= 2, "supported cpu only" );
8683       Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
8684       if (UseAVX > 2) {
8685         movl(rtmp, 0xffff);
8686         kmovwl(k1, rtmp);
8687       }
8688       movdl(xtmp, value);
8689       if (UseAVX > 2 && UseUnalignedLoadStores) {
8690         // Fill 64-byte chunks
8691         Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
8692         evpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
8693 
8694         subl(count, 16 << shift);
8695         jcc(Assembler::less, L_check_fill_32_bytes);
8696         align(16);
8697 
8698         BIND(L_fill_64_bytes_loop);
8699         evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
8700         addptr(to, 64);
8701         subl(count, 16 << shift);
8702         jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
8703 
8704         BIND(L_check_fill_32_bytes);
8705         addl(count, 8 << shift);
8706         jccb(Assembler::less, L_check_fill_8_bytes);
8707         vmovdqu(Address(to, 0), xtmp);
8708         addptr(to, 32);
8709         subl(count, 8 << shift);
8710 
8711         BIND(L_check_fill_8_bytes);
8712       } else if (UseAVX == 2 && UseUnalignedLoadStores) {
8713         // Fill 64-byte chunks
8714         Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
8715         vpbroadcastd(xtmp, xtmp);
8716 
8717         subl(count, 16 << shift);
8718         jcc(Assembler::less, L_check_fill_32_bytes);
8719         align(16);
8720 
8721         BIND(L_fill_64_bytes_loop);
8722         vmovdqu(Address(to, 0), xtmp);
8723         vmovdqu(Address(to, 32), xtmp);
8724         addptr(to, 64);
8725         subl(count, 16 << shift);
8726         jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
8727 
8728         BIND(L_check_fill_32_bytes);
8729         addl(count, 8 << shift);
8730         jccb(Assembler::less, L_check_fill_8_bytes);
8731         vmovdqu(Address(to, 0), xtmp);
8732         addptr(to, 32);
8733         subl(count, 8 << shift);
8734 
8735         BIND(L_check_fill_8_bytes);
8736         // clean upper bits of YMM registers
8737         movdl(xtmp, value);
8738         pshufd(xtmp, xtmp, 0);
8739       } else {
8740         // Fill 32-byte chunks
8741         pshufd(xtmp, xtmp, 0);
8742 
8743         subl(count, 8 << shift);
8744         jcc(Assembler::less, L_check_fill_8_bytes);
8745         align(16);
8746 
8747         BIND(L_fill_32_bytes_loop);
8748 
8749         if (UseUnalignedLoadStores) {
8750           movdqu(Address(to, 0), xtmp);
8751           movdqu(Address(to, 16), xtmp);
8752         } else {
8753           movq(Address(to, 0), xtmp);
8754           movq(Address(to, 8), xtmp);
8755           movq(Address(to, 16), xtmp);
8756           movq(Address(to, 24), xtmp);
8757         }
8758 
8759         addptr(to, 32);
8760         subl(count, 8 << shift);
8761         jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
8762 
8763         BIND(L_check_fill_8_bytes);
8764       }
8765       addl(count, 8 << shift);
8766       jccb(Assembler::zero, L_exit);
8767       jmpb(L_fill_8_bytes);
8768 
8769       //
8770       // length is too short, just fill qwords
8771       //
8772       BIND(L_fill_8_bytes_loop);
8773       movq(Address(to, 0), xtmp);
8774       addptr(to, 8);
8775       BIND(L_fill_8_bytes);
8776       subl(count, 1 << (shift + 1));
8777       jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
8778     }
8779   }
8780   // fill trailing 4 bytes
8781   BIND(L_fill_4_bytes);
8782   testl(count, 1<<shift);
8783   jccb(Assembler::zero, L_fill_2_bytes);
8784   movl(Address(to, 0), value);
8785   if (t == T_BYTE || t == T_SHORT) {
8786     addptr(to, 4);
8787     BIND(L_fill_2_bytes);
8788     // fill trailing 2 bytes
8789     testl(count, 1<<(shift-1));
8790     jccb(Assembler::zero, L_fill_byte);
8791     movw(Address(to, 0), value);
8792     if (t == T_BYTE) {
8793       addptr(to, 2);
8794       BIND(L_fill_byte);
8795       // fill trailing byte
8796       testl(count, 1);
8797       jccb(Assembler::zero, L_exit);
8798       movb(Address(to, 0), value);
8799     } else {
8800       BIND(L_fill_byte);
8801     }
8802   } else {
8803     BIND(L_fill_2_bytes);
8804   }
8805   BIND(L_exit);
8806 }
8807 
8808 // encode char[] to byte[] in ISO_8859_1
8809 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
8810                                       XMMRegister tmp1Reg, XMMRegister tmp2Reg,
8811                                       XMMRegister tmp3Reg, XMMRegister tmp4Reg,
8812                                       Register tmp5, Register result) {
8813   // rsi: src
8814   // rdi: dst
8815   // rdx: len
8816   // rcx: tmp5
8817   // rax: result
8818   ShortBranchVerifier sbv(this);
8819   assert_different_registers(src, dst, len, tmp5, result);
8820   Label L_done, L_copy_1_char, L_copy_1_char_exit;
8821 
8822   // set result
8823   xorl(result, result);
8824   // check for zero length
8825   testl(len, len);
8826   jcc(Assembler::zero, L_done);
8827   movl(result, len);
8828 
8829   // Setup pointers
8830   lea(src, Address(src, len, Address::times_2)); // char[]
8831   lea(dst, Address(dst, len, Address::times_1)); // byte[]
8832   negptr(len);
8833 
8834   if (UseSSE42Intrinsics || UseAVX >= 2) {
8835     assert(UseSSE42Intrinsics ? UseSSE >= 4 : true, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
8836     Label L_chars_8_check, L_copy_8_chars, L_copy_8_chars_exit;
8837     Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
8838 
8839     if (UseAVX >= 2) {
8840       Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
8841       movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vector
8842       movdl(tmp1Reg, tmp5);
8843       vpbroadcastd(tmp1Reg, tmp1Reg);
8844       jmpb(L_chars_32_check);
8845 
8846       bind(L_copy_32_chars);
8847       vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
8848       vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
8849       vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
8850       vptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in  vector
8851       jccb(Assembler::notZero, L_copy_32_chars_exit);
8852       vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1);
8853       vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1);
8854       vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
8855 
8856       bind(L_chars_32_check);
8857       addptr(len, 32);
8858       jccb(Assembler::lessEqual, L_copy_32_chars);
8859 
8860       bind(L_copy_32_chars_exit);
8861       subptr(len, 16);
8862       jccb(Assembler::greater, L_copy_16_chars_exit);
8863 
8864     } else if (UseSSE42Intrinsics) {
8865       movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vector
8866       movdl(tmp1Reg, tmp5);
8867       pshufd(tmp1Reg, tmp1Reg, 0);
8868       jmpb(L_chars_16_check);
8869     }
8870 
8871     bind(L_copy_16_chars);
8872     if (UseAVX >= 2) {
8873       vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
8874       vptest(tmp2Reg, tmp1Reg);
8875       jccb(Assembler::notZero, L_copy_16_chars_exit);
8876       vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
8877       vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
8878     } else {
8879       if (UseAVX > 0) {
8880         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
8881         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
8882         vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0);
8883       } else {
8884         movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
8885         por(tmp2Reg, tmp3Reg);
8886         movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
8887         por(tmp2Reg, tmp4Reg);
8888       }
8889       ptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in  vector
8890       jccb(Assembler::notZero, L_copy_16_chars_exit);
8891       packuswb(tmp3Reg, tmp4Reg);
8892     }
8893     movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
8894 
8895     bind(L_chars_16_check);
8896     addptr(len, 16);
8897     jccb(Assembler::lessEqual, L_copy_16_chars);
8898 
8899     bind(L_copy_16_chars_exit);
8900     if (UseAVX >= 2) {
8901       // clean upper bits of YMM registers
8902       vpxor(tmp2Reg, tmp2Reg);
8903       vpxor(tmp3Reg, tmp3Reg);
8904       vpxor(tmp4Reg, tmp4Reg);
8905       movdl(tmp1Reg, tmp5);
8906       pshufd(tmp1Reg, tmp1Reg, 0);
8907     }
8908     subptr(len, 8);
8909     jccb(Assembler::greater, L_copy_8_chars_exit);
8910 
8911     bind(L_copy_8_chars);
8912     movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
8913     ptest(tmp3Reg, tmp1Reg);
8914     jccb(Assembler::notZero, L_copy_8_chars_exit);
8915     packuswb(tmp3Reg, tmp1Reg);
8916     movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
8917     addptr(len, 8);
8918     jccb(Assembler::lessEqual, L_copy_8_chars);
8919 
8920     bind(L_copy_8_chars_exit);
8921     subptr(len, 8);
8922     jccb(Assembler::zero, L_done);
8923   }
8924 
8925   bind(L_copy_1_char);
8926   load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
8927   testl(tmp5, 0xff00);      // check if Unicode char
8928   jccb(Assembler::notZero, L_copy_1_char_exit);
8929   movb(Address(dst, len, Address::times_1, 0), tmp5);
8930   addptr(len, 1);
8931   jccb(Assembler::less, L_copy_1_char);
8932 
8933   bind(L_copy_1_char_exit);
8934   addptr(result, len); // len is negative count of not processed elements
8935   bind(L_done);
8936 }
8937 
8938 #ifdef _LP64
8939 /**
8940  * Helper for multiply_to_len().
8941  */
8942 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
8943   addq(dest_lo, src1);
8944   adcq(dest_hi, 0);
8945   addq(dest_lo, src2);
8946   adcq(dest_hi, 0);
8947 }
8948 
8949 /**
8950  * Multiply 64 bit by 64 bit first loop.
8951  */
8952 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
8953                                            Register y, Register y_idx, Register z,
8954                                            Register carry, Register product,
8955                                            Register idx, Register kdx) {
8956   //
8957   //  jlong carry, x[], y[], z[];
8958   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
8959   //    huge_128 product = y[idx] * x[xstart] + carry;
8960   //    z[kdx] = (jlong)product;
8961   //    carry  = (jlong)(product >>> 64);
8962   //  }
8963   //  z[xstart] = carry;
8964   //
8965 
8966   Label L_first_loop, L_first_loop_exit;
8967   Label L_one_x, L_one_y, L_multiply;
8968 
8969   decrementl(xstart);
8970   jcc(Assembler::negative, L_one_x);
8971 
8972   movq(x_xstart, Address(x, xstart, Address::times_4,  0));
8973   rorq(x_xstart, 32); // convert big-endian to little-endian
8974 
8975   bind(L_first_loop);
8976   decrementl(idx);
8977   jcc(Assembler::negative, L_first_loop_exit);
8978   decrementl(idx);
8979   jcc(Assembler::negative, L_one_y);
8980   movq(y_idx, Address(y, idx, Address::times_4,  0));
8981   rorq(y_idx, 32); // convert big-endian to little-endian
8982   bind(L_multiply);
8983   movq(product, x_xstart);
8984   mulq(y_idx); // product(rax) * y_idx -> rdx:rax
8985   addq(product, carry);
8986   adcq(rdx, 0);
8987   subl(kdx, 2);
8988   movl(Address(z, kdx, Address::times_4,  4), product);
8989   shrq(product, 32);
8990   movl(Address(z, kdx, Address::times_4,  0), product);
8991   movq(carry, rdx);
8992   jmp(L_first_loop);
8993 
8994   bind(L_one_y);
8995   movl(y_idx, Address(y,  0));
8996   jmp(L_multiply);
8997 
8998   bind(L_one_x);
8999   movl(x_xstart, Address(x,  0));
9000   jmp(L_first_loop);
9001 
9002   bind(L_first_loop_exit);
9003 }
9004 
9005 /**
9006  * Multiply 64 bit by 64 bit and add 128 bit.
9007  */
9008 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
9009                                             Register yz_idx, Register idx,
9010                                             Register carry, Register product, int offset) {
9011   //     huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
9012   //     z[kdx] = (jlong)product;
9013 
9014   movq(yz_idx, Address(y, idx, Address::times_4,  offset));
9015   rorq(yz_idx, 32); // convert big-endian to little-endian
9016   movq(product, x_xstart);
9017   mulq(yz_idx);     // product(rax) * yz_idx -> rdx:product(rax)
9018   movq(yz_idx, Address(z, idx, Address::times_4,  offset));
9019   rorq(yz_idx, 32); // convert big-endian to little-endian
9020 
9021   add2_with_carry(rdx, product, carry, yz_idx);
9022 
9023   movl(Address(z, idx, Address::times_4,  offset+4), product);
9024   shrq(product, 32);
9025   movl(Address(z, idx, Address::times_4,  offset), product);
9026 
9027 }
9028 
9029 /**
9030  * Multiply 128 bit by 128 bit. Unrolled inner loop.
9031  */
9032 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
9033                                              Register yz_idx, Register idx, Register jdx,
9034                                              Register carry, Register product,
9035                                              Register carry2) {
9036   //   jlong carry, x[], y[], z[];
9037   //   int kdx = ystart+1;
9038   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
9039   //     huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
9040   //     z[kdx+idx+1] = (jlong)product;
9041   //     jlong carry2  = (jlong)(product >>> 64);
9042   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
9043   //     z[kdx+idx] = (jlong)product;
9044   //     carry  = (jlong)(product >>> 64);
9045   //   }
9046   //   idx += 2;
9047   //   if (idx > 0) {
9048   //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
9049   //     z[kdx+idx] = (jlong)product;
9050   //     carry  = (jlong)(product >>> 64);
9051   //   }
9052   //
9053 
9054   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
9055 
9056   movl(jdx, idx);
9057   andl(jdx, 0xFFFFFFFC);
9058   shrl(jdx, 2);
9059 
9060   bind(L_third_loop);
9061   subl(jdx, 1);
9062   jcc(Assembler::negative, L_third_loop_exit);
9063   subl(idx, 4);
9064 
9065   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
9066   movq(carry2, rdx);
9067 
9068   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
9069   movq(carry, rdx);
9070   jmp(L_third_loop);
9071 
9072   bind (L_third_loop_exit);
9073 
9074   andl (idx, 0x3);
9075   jcc(Assembler::zero, L_post_third_loop_done);
9076 
9077   Label L_check_1;
9078   subl(idx, 2);
9079   jcc(Assembler::negative, L_check_1);
9080 
9081   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
9082   movq(carry, rdx);
9083 
9084   bind (L_check_1);
9085   addl (idx, 0x2);
9086   andl (idx, 0x1);
9087   subl(idx, 1);
9088   jcc(Assembler::negative, L_post_third_loop_done);
9089 
9090   movl(yz_idx, Address(y, idx, Address::times_4,  0));
9091   movq(product, x_xstart);
9092   mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
9093   movl(yz_idx, Address(z, idx, Address::times_4,  0));
9094 
9095   add2_with_carry(rdx, product, yz_idx, carry);
9096 
9097   movl(Address(z, idx, Address::times_4,  0), product);
9098   shrq(product, 32);
9099 
9100   shlq(rdx, 32);
9101   orq(product, rdx);
9102   movq(carry, product);
9103 
9104   bind(L_post_third_loop_done);
9105 }
9106 
9107 /**
9108  * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
9109  *
9110  */
9111 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
9112                                                   Register carry, Register carry2,
9113                                                   Register idx, Register jdx,
9114                                                   Register yz_idx1, Register yz_idx2,
9115                                                   Register tmp, Register tmp3, Register tmp4) {
9116   assert(UseBMI2Instructions, "should be used only when BMI2 is available");
9117 
9118   //   jlong carry, x[], y[], z[];
9119   //   int kdx = ystart+1;
9120   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
9121   //     huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
9122   //     jlong carry2  = (jlong)(tmp3 >>> 64);
9123   //     huge_128 tmp4 = (y[idx]   * rdx) + z[kdx+idx] + carry2;
9124   //     carry  = (jlong)(tmp4 >>> 64);
9125   //     z[kdx+idx+1] = (jlong)tmp3;
9126   //     z[kdx+idx] = (jlong)tmp4;
9127   //   }
9128   //   idx += 2;
9129   //   if (idx > 0) {
9130   //     yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
9131   //     z[kdx+idx] = (jlong)yz_idx1;
9132   //     carry  = (jlong)(yz_idx1 >>> 64);
9133   //   }
9134   //
9135 
9136   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
9137 
9138   movl(jdx, idx);
9139   andl(jdx, 0xFFFFFFFC);
9140   shrl(jdx, 2);
9141 
9142   bind(L_third_loop);
9143   subl(jdx, 1);
9144   jcc(Assembler::negative, L_third_loop_exit);
9145   subl(idx, 4);
9146 
9147   movq(yz_idx1,  Address(y, idx, Address::times_4,  8));
9148   rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
9149   movq(yz_idx2, Address(y, idx, Address::times_4,  0));
9150   rorxq(yz_idx2, yz_idx2, 32);
9151 
9152   mulxq(tmp4, tmp3, yz_idx1);  //  yz_idx1 * rdx -> tmp4:tmp3
9153   mulxq(carry2, tmp, yz_idx2); //  yz_idx2 * rdx -> carry2:tmp
9154 
9155   movq(yz_idx1,  Address(z, idx, Address::times_4,  8));
9156   rorxq(yz_idx1, yz_idx1, 32);
9157   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
9158   rorxq(yz_idx2, yz_idx2, 32);
9159 
9160   if (VM_Version::supports_adx()) {
9161     adcxq(tmp3, carry);
9162     adoxq(tmp3, yz_idx1);
9163 
9164     adcxq(tmp4, tmp);
9165     adoxq(tmp4, yz_idx2);
9166 
9167     movl(carry, 0); // does not affect flags
9168     adcxq(carry2, carry);
9169     adoxq(carry2, carry);
9170   } else {
9171     add2_with_carry(tmp4, tmp3, carry, yz_idx1);
9172     add2_with_carry(carry2, tmp4, tmp, yz_idx2);
9173   }
9174   movq(carry, carry2);
9175 
9176   movl(Address(z, idx, Address::times_4, 12), tmp3);
9177   shrq(tmp3, 32);
9178   movl(Address(z, idx, Address::times_4,  8), tmp3);
9179 
9180   movl(Address(z, idx, Address::times_4,  4), tmp4);
9181   shrq(tmp4, 32);
9182   movl(Address(z, idx, Address::times_4,  0), tmp4);
9183 
9184   jmp(L_third_loop);
9185 
9186   bind (L_third_loop_exit);
9187 
9188   andl (idx, 0x3);
9189   jcc(Assembler::zero, L_post_third_loop_done);
9190 
9191   Label L_check_1;
9192   subl(idx, 2);
9193   jcc(Assembler::negative, L_check_1);
9194 
9195   movq(yz_idx1, Address(y, idx, Address::times_4,  0));
9196   rorxq(yz_idx1, yz_idx1, 32);
9197   mulxq(tmp4, tmp3, yz_idx1); //  yz_idx1 * rdx -> tmp4:tmp3
9198   movq(yz_idx2, Address(z, idx, Address::times_4,  0));
9199   rorxq(yz_idx2, yz_idx2, 32);
9200 
9201   add2_with_carry(tmp4, tmp3, carry, yz_idx2);
9202 
9203   movl(Address(z, idx, Address::times_4,  4), tmp3);
9204   shrq(tmp3, 32);
9205   movl(Address(z, idx, Address::times_4,  0), tmp3);
9206   movq(carry, tmp4);
9207 
9208   bind (L_check_1);
9209   addl (idx, 0x2);
9210   andl (idx, 0x1);
9211   subl(idx, 1);
9212   jcc(Assembler::negative, L_post_third_loop_done);
9213   movl(tmp4, Address(y, idx, Address::times_4,  0));
9214   mulxq(carry2, tmp3, tmp4);  //  tmp4 * rdx -> carry2:tmp3
9215   movl(tmp4, Address(z, idx, Address::times_4,  0));
9216 
9217   add2_with_carry(carry2, tmp3, tmp4, carry);
9218 
9219   movl(Address(z, idx, Address::times_4,  0), tmp3);
9220   shrq(tmp3, 32);
9221 
9222   shlq(carry2, 32);
9223   orq(tmp3, carry2);
9224   movq(carry, tmp3);
9225 
9226   bind(L_post_third_loop_done);
9227 }
9228 
9229 /**
9230  * Code for BigInteger::multiplyToLen() instrinsic.
9231  *
9232  * rdi: x
9233  * rax: xlen
9234  * rsi: y
9235  * rcx: ylen
9236  * r8:  z
9237  * r11: zlen
9238  * r12: tmp1
9239  * r13: tmp2
9240  * r14: tmp3
9241  * r15: tmp4
9242  * rbx: tmp5
9243  *
9244  */
9245 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
9246                                      Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
9247   ShortBranchVerifier sbv(this);
9248   assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
9249 
9250   push(tmp1);
9251   push(tmp2);
9252   push(tmp3);
9253   push(tmp4);
9254   push(tmp5);
9255 
9256   push(xlen);
9257   push(zlen);
9258 
9259   const Register idx = tmp1;
9260   const Register kdx = tmp2;
9261   const Register xstart = tmp3;
9262 
9263   const Register y_idx = tmp4;
9264   const Register carry = tmp5;
9265   const Register product  = xlen;
9266   const Register x_xstart = zlen;  // reuse register
9267 
9268   // First Loop.
9269   //
9270   //  final static long LONG_MASK = 0xffffffffL;
9271   //  int xstart = xlen - 1;
9272   //  int ystart = ylen - 1;
9273   //  long carry = 0;
9274   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
9275   //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
9276   //    z[kdx] = (int)product;
9277   //    carry = product >>> 32;
9278   //  }
9279   //  z[xstart] = (int)carry;
9280   //
9281 
9282   movl(idx, ylen);      // idx = ylen;
9283   movl(kdx, zlen);      // kdx = xlen+ylen;
9284   xorq(carry, carry);   // carry = 0;
9285 
9286   Label L_done;
9287 
9288   movl(xstart, xlen);
9289   decrementl(xstart);
9290   jcc(Assembler::negative, L_done);
9291 
9292   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
9293 
9294   Label L_second_loop;
9295   testl(kdx, kdx);
9296   jcc(Assembler::zero, L_second_loop);
9297 
9298   Label L_carry;
9299   subl(kdx, 1);
9300   jcc(Assembler::zero, L_carry);
9301 
9302   movl(Address(z, kdx, Address::times_4,  0), carry);
9303   shrq(carry, 32);
9304   subl(kdx, 1);
9305 
9306   bind(L_carry);
9307   movl(Address(z, kdx, Address::times_4,  0), carry);
9308 
9309   // Second and third (nested) loops.
9310   //
9311   // for (int i = xstart-1; i >= 0; i--) { // Second loop
9312   //   carry = 0;
9313   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
9314   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
9315   //                    (z[k] & LONG_MASK) + carry;
9316   //     z[k] = (int)product;
9317   //     carry = product >>> 32;
9318   //   }
9319   //   z[i] = (int)carry;
9320   // }
9321   //
9322   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
9323 
9324   const Register jdx = tmp1;
9325 
9326   bind(L_second_loop);
9327   xorl(carry, carry);    // carry = 0;
9328   movl(jdx, ylen);       // j = ystart+1
9329 
9330   subl(xstart, 1);       // i = xstart-1;
9331   jcc(Assembler::negative, L_done);
9332 
9333   push (z);
9334 
9335   Label L_last_x;
9336   lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
9337   subl(xstart, 1);       // i = xstart-1;
9338   jcc(Assembler::negative, L_last_x);
9339 
9340   if (UseBMI2Instructions) {
9341     movq(rdx,  Address(x, xstart, Address::times_4,  0));
9342     rorxq(rdx, rdx, 32); // convert big-endian to little-endian
9343   } else {
9344     movq(x_xstart, Address(x, xstart, Address::times_4,  0));
9345     rorq(x_xstart, 32);  // convert big-endian to little-endian
9346   }
9347 
9348   Label L_third_loop_prologue;
9349   bind(L_third_loop_prologue);
9350 
9351   push (x);
9352   push (xstart);
9353   push (ylen);
9354 
9355 
9356   if (UseBMI2Instructions) {
9357     multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
9358   } else { // !UseBMI2Instructions
9359     multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
9360   }
9361 
9362   pop(ylen);
9363   pop(xlen);
9364   pop(x);
9365   pop(z);
9366 
9367   movl(tmp3, xlen);
9368   addl(tmp3, 1);
9369   movl(Address(z, tmp3, Address::times_4,  0), carry);
9370   subl(tmp3, 1);
9371   jccb(Assembler::negative, L_done);
9372 
9373   shrq(carry, 32);
9374   movl(Address(z, tmp3, Address::times_4,  0), carry);
9375   jmp(L_second_loop);
9376 
9377   // Next infrequent code is moved outside loops.
9378   bind(L_last_x);
9379   if (UseBMI2Instructions) {
9380     movl(rdx, Address(x,  0));
9381   } else {
9382     movl(x_xstart, Address(x,  0));
9383   }
9384   jmp(L_third_loop_prologue);
9385 
9386   bind(L_done);
9387 
9388   pop(zlen);
9389   pop(xlen);
9390 
9391   pop(tmp5);
9392   pop(tmp4);
9393   pop(tmp3);
9394   pop(tmp2);
9395   pop(tmp1);
9396 }
9397 
9398 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
9399   Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
9400   assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
9401   Label VECTOR32_LOOP, VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
9402   Label VECTOR16_TAIL, VECTOR8_TAIL, VECTOR4_TAIL;
9403   Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
9404   Label SAME_TILL_END, DONE;
9405   Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL;
9406 
9407   //scale is in rcx in both Win64 and Unix
9408   ShortBranchVerifier sbv(this);
9409 
9410   shlq(length);
9411   xorq(result, result);
9412 
9413   cmpq(length, 8);
9414   jcc(Assembler::equal, VECTOR8_LOOP);
9415   jcc(Assembler::less, VECTOR4_TAIL);
9416 
9417   if (UseAVX >= 2){
9418 
9419     cmpq(length, 16);
9420     jcc(Assembler::equal, VECTOR16_LOOP);
9421     jcc(Assembler::less, VECTOR8_LOOP);
9422 
9423     cmpq(length, 32);
9424     jccb(Assembler::less, VECTOR16_TAIL);
9425 
9426     subq(length, 32);
9427     bind(VECTOR32_LOOP);
9428     vmovdqu(rymm0, Address(obja, result));
9429     vmovdqu(rymm1, Address(objb, result));
9430     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit);
9431     vptest(rymm2, rymm2);
9432     jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
9433     addq(result, 32);
9434     subq(length, 32);
9435     jccb(Assembler::greaterEqual, VECTOR32_LOOP);
9436     addq(length, 32);
9437     jcc(Assembler::equal, SAME_TILL_END);
9438     //falling through if less than 32 bytes left //close the branch here.
9439 
9440     bind(VECTOR16_TAIL);
9441     cmpq(length, 16);
9442     jccb(Assembler::less, VECTOR8_TAIL);
9443     bind(VECTOR16_LOOP);
9444     movdqu(rymm0, Address(obja, result));
9445     movdqu(rymm1, Address(objb, result));
9446     vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit);
9447     ptest(rymm2, rymm2);
9448     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
9449     addq(result, 16);
9450     subq(length, 16);
9451     jcc(Assembler::equal, SAME_TILL_END);
9452     //falling through if less than 16 bytes left
9453   } else {//regular intrinsics
9454 
9455     cmpq(length, 16);
9456     jccb(Assembler::less, VECTOR8_TAIL);
9457 
9458     subq(length, 16);
9459     bind(VECTOR16_LOOP);
9460     movdqu(rymm0, Address(obja, result));
9461     movdqu(rymm1, Address(objb, result));
9462     pxor(rymm0, rymm1);
9463     ptest(rymm0, rymm0);
9464     jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found
9465     addq(result, 16);
9466     subq(length, 16);
9467     jccb(Assembler::greaterEqual, VECTOR16_LOOP);
9468     addq(length, 16);
9469     jcc(Assembler::equal, SAME_TILL_END);
9470     //falling through if less than 16 bytes left
9471   }
9472 
9473   bind(VECTOR8_TAIL);
9474   cmpq(length, 8);
9475   jccb(Assembler::less, VECTOR4_TAIL);
9476   bind(VECTOR8_LOOP);
9477   movq(tmp1, Address(obja, result));
9478   movq(tmp2, Address(objb, result));
9479   xorq(tmp1, tmp2);
9480   testq(tmp1, tmp1);
9481   jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found
9482   addq(result, 8);
9483   subq(length, 8);
9484   jcc(Assembler::equal, SAME_TILL_END);
9485   //falling through if less than 8 bytes left
9486 
9487   bind(VECTOR4_TAIL);
9488   cmpq(length, 4);
9489   jccb(Assembler::less, BYTES_TAIL);
9490   bind(VECTOR4_LOOP);
9491   movl(tmp1, Address(obja, result));
9492   xorl(tmp1, Address(objb, result));
9493   testl(tmp1, tmp1);
9494   jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found
9495   addq(result, 4);
9496   subq(length, 4);
9497   jcc(Assembler::equal, SAME_TILL_END);
9498   //falling through if less than 4 bytes left
9499 
9500   bind(BYTES_TAIL);
9501   bind(BYTES_LOOP);
9502   load_unsigned_byte(tmp1, Address(obja, result));
9503   load_unsigned_byte(tmp2, Address(objb, result));
9504   xorl(tmp1, tmp2);
9505   testl(tmp1, tmp1);
9506   jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
9507   decq(length);
9508   jccb(Assembler::zero, SAME_TILL_END);
9509   incq(result);
9510   load_unsigned_byte(tmp1, Address(obja, result));
9511   load_unsigned_byte(tmp2, Address(objb, result));
9512   xorl(tmp1, tmp2);
9513   testl(tmp1, tmp1);
9514   jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
9515   decq(length);
9516   jccb(Assembler::zero, SAME_TILL_END);
9517   incq(result);
9518   load_unsigned_byte(tmp1, Address(obja, result));
9519   load_unsigned_byte(tmp2, Address(objb, result));
9520   xorl(tmp1, tmp2);
9521   testl(tmp1, tmp1);
9522   jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
9523   jmpb(SAME_TILL_END);
9524 
9525   if (UseAVX >= 2){
9526     bind(VECTOR32_NOT_EQUAL);
9527     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
9528     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
9529     vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit);
9530     vpmovmskb(tmp1, rymm0);
9531     bsfq(tmp1, tmp1);
9532     addq(result, tmp1);
9533     shrq(result);
9534     jmpb(DONE);
9535   }
9536 
9537   bind(VECTOR16_NOT_EQUAL);
9538   if (UseAVX >= 2){
9539     vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
9540     vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
9541     pxor(rymm0, rymm2);
9542   } else {
9543     pcmpeqb(rymm2, rymm2);
9544     pxor(rymm0, rymm1);
9545     pcmpeqb(rymm0, rymm1);
9546     pxor(rymm0, rymm2);
9547   }
9548   pmovmskb(tmp1, rymm0);
9549   bsfq(tmp1, tmp1);
9550   addq(result, tmp1);
9551   shrq(result);
9552   jmpb(DONE);
9553 
9554   bind(VECTOR8_NOT_EQUAL);
9555   bind(VECTOR4_NOT_EQUAL);
9556   bsfq(tmp1, tmp1);
9557   shrq(tmp1, 3);
9558   addq(result, tmp1);
9559   bind(BYTES_NOT_EQUAL);
9560   shrq(result);
9561   jmpb(DONE);
9562 
9563   bind(SAME_TILL_END);
9564   mov64(result, -1);
9565 
9566   bind(DONE);
9567 }
9568 
9569 
9570 //Helper functions for square_to_len()
9571 
9572 /**
9573  * Store the squares of x[], right shifted one bit (divided by 2) into z[]
9574  * Preserves x and z and modifies rest of the registers.
9575  */
9576 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
9577   // Perform square and right shift by 1
9578   // Handle odd xlen case first, then for even xlen do the following
9579   // jlong carry = 0;
9580   // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
9581   //     huge_128 product = x[j:j+1] * x[j:j+1];
9582   //     z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
9583   //     z[i+2:i+3] = (jlong)(product >>> 1);
9584   //     carry = (jlong)product;
9585   // }
9586 
9587   xorq(tmp5, tmp5);     // carry
9588   xorq(rdxReg, rdxReg);
9589   xorl(tmp1, tmp1);     // index for x
9590   xorl(tmp4, tmp4);     // index for z
9591 
9592   Label L_first_loop, L_first_loop_exit;
9593 
9594   testl(xlen, 1);
9595   jccb(Assembler::zero, L_first_loop); //jump if xlen is even
9596 
9597   // Square and right shift by 1 the odd element using 32 bit multiply
9598   movl(raxReg, Address(x, tmp1, Address::times_4, 0));
9599   imulq(raxReg, raxReg);
9600   shrq(raxReg, 1);
9601   adcq(tmp5, 0);
9602   movq(Address(z, tmp4, Address::times_4, 0), raxReg);
9603   incrementl(tmp1);
9604   addl(tmp4, 2);
9605 
9606   // Square and  right shift by 1 the rest using 64 bit multiply
9607   bind(L_first_loop);
9608   cmpptr(tmp1, xlen);
9609   jccb(Assembler::equal, L_first_loop_exit);
9610 
9611   // Square
9612   movq(raxReg, Address(x, tmp1, Address::times_4,  0));
9613   rorq(raxReg, 32);    // convert big-endian to little-endian
9614   mulq(raxReg);        // 64-bit multiply rax * rax -> rdx:rax
9615 
9616   // Right shift by 1 and save carry
9617   shrq(tmp5, 1);       // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
9618   rcrq(rdxReg, 1);
9619   rcrq(raxReg, 1);
9620   adcq(tmp5, 0);
9621 
9622   // Store result in z
9623   movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
9624   movq(Address(z, tmp4, Address::times_4, 8), raxReg);
9625 
9626   // Update indices for x and z
9627   addl(tmp1, 2);
9628   addl(tmp4, 4);
9629   jmp(L_first_loop);
9630 
9631   bind(L_first_loop_exit);
9632 }
9633 
9634 
9635 /**
9636  * Perform the following multiply add operation using BMI2 instructions
9637  * carry:sum = sum + op1*op2 + carry
9638  * op2 should be in rdx
9639  * op2 is preserved, all other registers are modified
9640  */
9641 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
9642   // assert op2 is rdx
9643   mulxq(tmp2, op1, op1);  //  op1 * op2 -> tmp2:op1
9644   addq(sum, carry);
9645   adcq(tmp2, 0);
9646   addq(sum, op1);
9647   adcq(tmp2, 0);
9648   movq(carry, tmp2);
9649 }
9650 
9651 /**
9652  * Perform the following multiply add operation:
9653  * carry:sum = sum + op1*op2 + carry
9654  * Preserves op1, op2 and modifies rest of registers
9655  */
9656 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
9657   // rdx:rax = op1 * op2
9658   movq(raxReg, op2);
9659   mulq(op1);
9660 
9661   //  rdx:rax = sum + carry + rdx:rax
9662   addq(sum, carry);
9663   adcq(rdxReg, 0);
9664   addq(sum, raxReg);
9665   adcq(rdxReg, 0);
9666 
9667   // carry:sum = rdx:sum
9668   movq(carry, rdxReg);
9669 }
9670 
9671 /**
9672  * Add 64 bit long carry into z[] with carry propogation.
9673  * Preserves z and carry register values and modifies rest of registers.
9674  *
9675  */
9676 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
9677   Label L_fourth_loop, L_fourth_loop_exit;
9678 
9679   movl(tmp1, 1);
9680   subl(zlen, 2);
9681   addq(Address(z, zlen, Address::times_4, 0), carry);
9682 
9683   bind(L_fourth_loop);
9684   jccb(Assembler::carryClear, L_fourth_loop_exit);
9685   subl(zlen, 2);
9686   jccb(Assembler::negative, L_fourth_loop_exit);
9687   addq(Address(z, zlen, Address::times_4, 0), tmp1);
9688   jmp(L_fourth_loop);
9689   bind(L_fourth_loop_exit);
9690 }
9691 
9692 /**
9693  * Shift z[] left by 1 bit.
9694  * Preserves x, len, z and zlen registers and modifies rest of the registers.
9695  *
9696  */
9697 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
9698 
9699   Label L_fifth_loop, L_fifth_loop_exit;
9700 
9701   // Fifth loop
9702   // Perform primitiveLeftShift(z, zlen, 1)
9703 
9704   const Register prev_carry = tmp1;
9705   const Register new_carry = tmp4;
9706   const Register value = tmp2;
9707   const Register zidx = tmp3;
9708 
9709   // int zidx, carry;
9710   // long value;
9711   // carry = 0;
9712   // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
9713   //    (carry:value)  = (z[i] << 1) | carry ;
9714   //    z[i] = value;
9715   // }
9716 
9717   movl(zidx, zlen);
9718   xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
9719 
9720   bind(L_fifth_loop);
9721   decl(zidx);  // Use decl to preserve carry flag
9722   decl(zidx);
9723   jccb(Assembler::negative, L_fifth_loop_exit);
9724 
9725   if (UseBMI2Instructions) {
9726      movq(value, Address(z, zidx, Address::times_4, 0));
9727      rclq(value, 1);
9728      rorxq(value, value, 32);
9729      movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
9730   }
9731   else {
9732     // clear new_carry
9733     xorl(new_carry, new_carry);
9734 
9735     // Shift z[i] by 1, or in previous carry and save new carry
9736     movq(value, Address(z, zidx, Address::times_4, 0));
9737     shlq(value, 1);
9738     adcl(new_carry, 0);
9739 
9740     orq(value, prev_carry);
9741     rorq(value, 0x20);
9742     movq(Address(z, zidx, Address::times_4,  0), value);  // Store back in big endian form
9743 
9744     // Set previous carry = new carry
9745     movl(prev_carry, new_carry);
9746   }
9747   jmp(L_fifth_loop);
9748 
9749   bind(L_fifth_loop_exit);
9750 }
9751 
9752 
9753 /**
9754  * Code for BigInteger::squareToLen() intrinsic
9755  *
9756  * rdi: x
9757  * rsi: len
9758  * r8:  z
9759  * rcx: zlen
9760  * r12: tmp1
9761  * r13: tmp2
9762  * r14: tmp3
9763  * r15: tmp4
9764  * rbx: tmp5
9765  *
9766  */
9767 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
9768 
9769   Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, fifth_loop, fifth_loop_exit, L_last_x, L_multiply;
9770   push(tmp1);
9771   push(tmp2);
9772   push(tmp3);
9773   push(tmp4);
9774   push(tmp5);
9775 
9776   // First loop
9777   // Store the squares, right shifted one bit (i.e., divided by 2).
9778   square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
9779 
9780   // Add in off-diagonal sums.
9781   //
9782   // Second, third (nested) and fourth loops.
9783   // zlen +=2;
9784   // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
9785   //    carry = 0;
9786   //    long op2 = x[xidx:xidx+1];
9787   //    for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
9788   //       k -= 2;
9789   //       long op1 = x[j:j+1];
9790   //       long sum = z[k:k+1];
9791   //       carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
9792   //       z[k:k+1] = sum;
9793   //    }
9794   //    add_one_64(z, k, carry, tmp_regs);
9795   // }
9796 
9797   const Register carry = tmp5;
9798   const Register sum = tmp3;
9799   const Register op1 = tmp4;
9800   Register op2 = tmp2;
9801 
9802   push(zlen);
9803   push(len);
9804   addl(zlen,2);
9805   bind(L_second_loop);
9806   xorq(carry, carry);
9807   subl(zlen, 4);
9808   subl(len, 2);
9809   push(zlen);
9810   push(len);
9811   cmpl(len, 0);
9812   jccb(Assembler::lessEqual, L_second_loop_exit);
9813 
9814   // Multiply an array by one 64 bit long.
9815   if (UseBMI2Instructions) {
9816     op2 = rdxReg;
9817     movq(op2, Address(x, len, Address::times_4,  0));
9818     rorxq(op2, op2, 32);
9819   }
9820   else {
9821     movq(op2, Address(x, len, Address::times_4,  0));
9822     rorq(op2, 32);
9823   }
9824 
9825   bind(L_third_loop);
9826   decrementl(len);
9827   jccb(Assembler::negative, L_third_loop_exit);
9828   decrementl(len);
9829   jccb(Assembler::negative, L_last_x);
9830 
9831   movq(op1, Address(x, len, Address::times_4,  0));
9832   rorq(op1, 32);
9833 
9834   bind(L_multiply);
9835   subl(zlen, 2);
9836   movq(sum, Address(z, zlen, Address::times_4,  0));
9837 
9838   // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
9839   if (UseBMI2Instructions) {
9840     multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
9841   }
9842   else {
9843     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
9844   }
9845 
9846   movq(Address(z, zlen, Address::times_4, 0), sum);
9847 
9848   jmp(L_third_loop);
9849   bind(L_third_loop_exit);
9850 
9851   // Fourth loop
9852   // Add 64 bit long carry into z with carry propogation.
9853   // Uses offsetted zlen.
9854   add_one_64(z, zlen, carry, tmp1);
9855 
9856   pop(len);
9857   pop(zlen);
9858   jmp(L_second_loop);
9859 
9860   // Next infrequent code is moved outside loops.
9861   bind(L_last_x);
9862   movl(op1, Address(x, 0));
9863   jmp(L_multiply);
9864 
9865   bind(L_second_loop_exit);
9866   pop(len);
9867   pop(zlen);
9868   pop(len);
9869   pop(zlen);
9870 
9871   // Fifth loop
9872   // Shift z left 1 bit.
9873   lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
9874 
9875   // z[zlen-1] |= x[len-1] & 1;
9876   movl(tmp3, Address(x, len, Address::times_4, -4));
9877   andl(tmp3, 1);
9878   orl(Address(z, zlen, Address::times_4,  -4), tmp3);
9879 
9880   pop(tmp5);
9881   pop(tmp4);
9882   pop(tmp3);
9883   pop(tmp2);
9884   pop(tmp1);
9885 }
9886 
9887 /**
9888  * Helper function for mul_add()
9889  * Multiply the in[] by int k and add to out[] starting at offset offs using
9890  * 128 bit by 32 bit multiply and return the carry in tmp5.
9891  * Only quad int aligned length of in[] is operated on in this function.
9892  * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
9893  * This function preserves out, in and k registers.
9894  * len and offset point to the appropriate index in "in" & "out" correspondingly
9895  * tmp5 has the carry.
9896  * other registers are temporary and are modified.
9897  *
9898  */
9899 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
9900   Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
9901   Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
9902 
9903   Label L_first_loop, L_first_loop_exit;
9904 
9905   movl(tmp1, len);
9906   shrl(tmp1, 2);
9907 
9908   bind(L_first_loop);
9909   subl(tmp1, 1);
9910   jccb(Assembler::negative, L_first_loop_exit);
9911 
9912   subl(len, 4);
9913   subl(offset, 4);
9914 
9915   Register op2 = tmp2;
9916   const Register sum = tmp3;
9917   const Register op1 = tmp4;
9918   const Register carry = tmp5;
9919 
9920   if (UseBMI2Instructions) {
9921     op2 = rdxReg;
9922   }
9923 
9924   movq(op1, Address(in, len, Address::times_4,  8));
9925   rorq(op1, 32);
9926   movq(sum, Address(out, offset, Address::times_4,  8));
9927   rorq(sum, 32);
9928   if (UseBMI2Instructions) {
9929     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
9930   }
9931   else {
9932     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
9933   }
9934   // Store back in big endian from little endian
9935   rorq(sum, 0x20);
9936   movq(Address(out, offset, Address::times_4,  8), sum);
9937 
9938   movq(op1, Address(in, len, Address::times_4,  0));
9939   rorq(op1, 32);
9940   movq(sum, Address(out, offset, Address::times_4,  0));
9941   rorq(sum, 32);
9942   if (UseBMI2Instructions) {
9943     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
9944   }
9945   else {
9946     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
9947   }
9948   // Store back in big endian from little endian
9949   rorq(sum, 0x20);
9950   movq(Address(out, offset, Address::times_4,  0), sum);
9951 
9952   jmp(L_first_loop);
9953   bind(L_first_loop_exit);
9954 }
9955 
9956 /**
9957  * Code for BigInteger::mulAdd() intrinsic
9958  *
9959  * rdi: out
9960  * rsi: in
9961  * r11: offs (out.length - offset)
9962  * rcx: len
9963  * r8:  k
9964  * r12: tmp1
9965  * r13: tmp2
9966  * r14: tmp3
9967  * r15: tmp4
9968  * rbx: tmp5
9969  * Multiply the in[] by word k and add to out[], return the carry in rax
9970  */
9971 void MacroAssembler::mul_add(Register out, Register in, Register offs,
9972    Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
9973    Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
9974 
9975   Label L_carry, L_last_in, L_done;
9976 
9977 // carry = 0;
9978 // for (int j=len-1; j >= 0; j--) {
9979 //    long product = (in[j] & LONG_MASK) * kLong +
9980 //                   (out[offs] & LONG_MASK) + carry;
9981 //    out[offs--] = (int)product;
9982 //    carry = product >>> 32;
9983 // }
9984 //
9985   push(tmp1);
9986   push(tmp2);
9987   push(tmp3);
9988   push(tmp4);
9989   push(tmp5);
9990 
9991   Register op2 = tmp2;
9992   const Register sum = tmp3;
9993   const Register op1 = tmp4;
9994   const Register carry =  tmp5;
9995 
9996   if (UseBMI2Instructions) {
9997     op2 = rdxReg;
9998     movl(op2, k);
9999   }
10000   else {
10001     movl(op2, k);
10002   }
10003 
10004   xorq(carry, carry);
10005 
10006   //First loop
10007 
10008   //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
10009   //The carry is in tmp5
10010   mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
10011 
10012   //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
10013   decrementl(len);
10014   jccb(Assembler::negative, L_carry);
10015   decrementl(len);
10016   jccb(Assembler::negative, L_last_in);
10017 
10018   movq(op1, Address(in, len, Address::times_4,  0));
10019   rorq(op1, 32);
10020 
10021   subl(offs, 2);
10022   movq(sum, Address(out, offs, Address::times_4,  0));
10023   rorq(sum, 32);
10024 
10025   if (UseBMI2Instructions) {
10026     multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
10027   }
10028   else {
10029     multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
10030   }
10031 
10032   // Store back in big endian from little endian
10033   rorq(sum, 0x20);
10034   movq(Address(out, offs, Address::times_4,  0), sum);
10035 
10036   testl(len, len);
10037   jccb(Assembler::zero, L_carry);
10038 
10039   //Multiply the last in[] entry, if any
10040   bind(L_last_in);
10041   movl(op1, Address(in, 0));
10042   movl(sum, Address(out, offs, Address::times_4,  -4));
10043 
10044   movl(raxReg, k);
10045   mull(op1); //tmp4 * eax -> edx:eax
10046   addl(sum, carry);
10047   adcl(rdxReg, 0);
10048   addl(sum, raxReg);
10049   adcl(rdxReg, 0);
10050   movl(carry, rdxReg);
10051 
10052   movl(Address(out, offs, Address::times_4,  -4), sum);
10053 
10054   bind(L_carry);
10055   //return tmp5/carry as carry in rax
10056   movl(rax, carry);
10057 
10058   bind(L_done);
10059   pop(tmp5);
10060   pop(tmp4);
10061   pop(tmp3);
10062   pop(tmp2);
10063   pop(tmp1);
10064 }
10065 #endif
10066 
10067 /**
10068  * Emits code to update CRC-32 with a byte value according to constants in table
10069  *
10070  * @param [in,out]crc   Register containing the crc.
10071  * @param [in]val       Register containing the byte to fold into the CRC.
10072  * @param [in]table     Register containing the table of crc constants.
10073  *
10074  * uint32_t crc;
10075  * val = crc_table[(val ^ crc) & 0xFF];
10076  * crc = val ^ (crc >> 8);
10077  *
10078  */
10079 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
10080   xorl(val, crc);
10081   andl(val, 0xFF);
10082   shrl(crc, 8); // unsigned shift
10083   xorl(crc, Address(table, val, Address::times_4, 0));
10084 }
10085 
10086 /**
10087  * Fold 128-bit data chunk
10088  */
10089 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
10090   if (UseAVX > 0) {
10091     vpclmulhdq(xtmp, xK, xcrc); // [123:64]
10092     vpclmulldq(xcrc, xK, xcrc); // [63:0]
10093     vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */);
10094     pxor(xcrc, xtmp);
10095   } else {
10096     movdqa(xtmp, xcrc);
10097     pclmulhdq(xtmp, xK);   // [123:64]
10098     pclmulldq(xcrc, xK);   // [63:0]
10099     pxor(xcrc, xtmp);
10100     movdqu(xtmp, Address(buf, offset));
10101     pxor(xcrc, xtmp);
10102   }
10103 }
10104 
10105 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
10106   if (UseAVX > 0) {
10107     vpclmulhdq(xtmp, xK, xcrc);
10108     vpclmulldq(xcrc, xK, xcrc);
10109     pxor(xcrc, xbuf);
10110     pxor(xcrc, xtmp);
10111   } else {
10112     movdqa(xtmp, xcrc);
10113     pclmulhdq(xtmp, xK);
10114     pclmulldq(xcrc, xK);
10115     pxor(xcrc, xbuf);
10116     pxor(xcrc, xtmp);
10117   }
10118 }
10119 
10120 /**
10121  * 8-bit folds to compute 32-bit CRC
10122  *
10123  * uint64_t xcrc;
10124  * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
10125  */
10126 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
10127   movdl(tmp, xcrc);
10128   andl(tmp, 0xFF);
10129   movdl(xtmp, Address(table, tmp, Address::times_4, 0));
10130   psrldq(xcrc, 1); // unsigned shift one byte
10131   pxor(xcrc, xtmp);
10132 }
10133 
10134 /**
10135  * uint32_t crc;
10136  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
10137  */
10138 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
10139   movl(tmp, crc);
10140   andl(tmp, 0xFF);
10141   shrl(crc, 8);
10142   xorl(crc, Address(table, tmp, Address::times_4, 0));
10143 }
10144 
10145 /**
10146  * @param crc   register containing existing CRC (32-bit)
10147  * @param buf   register pointing to input byte buffer (byte*)
10148  * @param len   register containing number of bytes
10149  * @param table register that will contain address of CRC table
10150  * @param tmp   scratch register
10151  */
10152 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
10153   assert_different_registers(crc, buf, len, table, tmp, rax);
10154 
10155   Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
10156   Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
10157 
10158   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
10159   // context for the registers used, where all instructions below are using 128-bit mode
10160   // On EVEX without VL and BW, these instructions will all be AVX.
10161   if (VM_Version::supports_avx512vlbw()) {
10162     movl(tmp, 0xffff);
10163     kmovwl(k1, tmp);
10164   }
10165 
10166   lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
10167   notl(crc); // ~crc
10168   cmpl(len, 16);
10169   jcc(Assembler::less, L_tail);
10170 
10171   // Align buffer to 16 bytes
10172   movl(tmp, buf);
10173   andl(tmp, 0xF);
10174   jccb(Assembler::zero, L_aligned);
10175   subl(tmp,  16);
10176   addl(len, tmp);
10177 
10178   align(4);
10179   BIND(L_align_loop);
10180   movsbl(rax, Address(buf, 0)); // load byte with sign extension
10181   update_byte_crc32(crc, rax, table);
10182   increment(buf);
10183   incrementl(tmp);
10184   jccb(Assembler::less, L_align_loop);
10185 
10186   BIND(L_aligned);
10187   movl(tmp, len); // save
10188   shrl(len, 4);
10189   jcc(Assembler::zero, L_tail_restore);
10190 
10191   // Fold crc into first bytes of vector
10192   movdqa(xmm1, Address(buf, 0));
10193   movdl(rax, xmm1);
10194   xorl(crc, rax);
10195   pinsrd(xmm1, crc, 0);
10196   addptr(buf, 16);
10197   subl(len, 4); // len > 0
10198   jcc(Assembler::less, L_fold_tail);
10199 
10200   movdqa(xmm2, Address(buf,  0));
10201   movdqa(xmm3, Address(buf, 16));
10202   movdqa(xmm4, Address(buf, 32));
10203   addptr(buf, 48);
10204   subl(len, 3);
10205   jcc(Assembler::lessEqual, L_fold_512b);
10206 
10207   // Fold total 512 bits of polynomial on each iteration,
10208   // 128 bits per each of 4 parallel streams.
10209   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
10210 
10211   align(32);
10212   BIND(L_fold_512b_loop);
10213   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
10214   fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
10215   fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
10216   fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
10217   addptr(buf, 64);
10218   subl(len, 4);
10219   jcc(Assembler::greater, L_fold_512b_loop);
10220 
10221   // Fold 512 bits to 128 bits.
10222   BIND(L_fold_512b);
10223   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
10224   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
10225   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
10226   fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
10227 
10228   // Fold the rest of 128 bits data chunks
10229   BIND(L_fold_tail);
10230   addl(len, 3);
10231   jccb(Assembler::lessEqual, L_fold_128b);
10232   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
10233 
10234   BIND(L_fold_tail_loop);
10235   fold_128bit_crc32(xmm1, xmm0, xmm5, buf,  0);
10236   addptr(buf, 16);
10237   decrementl(len);
10238   jccb(Assembler::greater, L_fold_tail_loop);
10239 
10240   // Fold 128 bits in xmm1 down into 32 bits in crc register.
10241   BIND(L_fold_128b);
10242   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
10243   if (UseAVX > 0) {
10244     vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
10245     vpand(xmm3, xmm0, xmm2, 0 /* vector_len */);
10246     vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
10247   } else {
10248     movdqa(xmm2, xmm0);
10249     pclmulqdq(xmm2, xmm1, 0x1);
10250     movdqa(xmm3, xmm0);
10251     pand(xmm3, xmm2);
10252     pclmulqdq(xmm0, xmm3, 0x1);
10253   }
10254   psrldq(xmm1, 8);
10255   psrldq(xmm2, 4);
10256   pxor(xmm0, xmm1);
10257   pxor(xmm0, xmm2);
10258 
10259   // 8 8-bit folds to compute 32-bit CRC.
10260   for (int j = 0; j < 4; j++) {
10261     fold_8bit_crc32(xmm0, table, xmm1, rax);
10262   }
10263   movdl(crc, xmm0); // mov 32 bits to general register
10264   for (int j = 0; j < 4; j++) {
10265     fold_8bit_crc32(crc, table, rax);
10266   }
10267 
10268   BIND(L_tail_restore);
10269   movl(len, tmp); // restore
10270   BIND(L_tail);
10271   andl(len, 0xf);
10272   jccb(Assembler::zero, L_exit);
10273 
10274   // Fold the rest of bytes
10275   align(4);
10276   BIND(L_tail_loop);
10277   movsbl(rax, Address(buf, 0)); // load byte with sign extension
10278   update_byte_crc32(crc, rax, table);
10279   increment(buf);
10280   decrementl(len);
10281   jccb(Assembler::greater, L_tail_loop);
10282 
10283   BIND(L_exit);
10284   notl(crc); // ~c
10285 }
10286 
10287 #ifdef _LP64
10288 // S. Gueron / Information Processing Letters 112 (2012) 184
10289 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
10290 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
10291 // Output: the 64-bit carry-less product of B * CONST
10292 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
10293                                      Register tmp1, Register tmp2, Register tmp3) {
10294   lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
10295   if (n > 0) {
10296     addq(tmp3, n * 256 * 8);
10297   }
10298   //    Q1 = TABLEExt[n][B & 0xFF];
10299   movl(tmp1, in);
10300   andl(tmp1, 0x000000FF);
10301   shll(tmp1, 3);
10302   addq(tmp1, tmp3);
10303   movq(tmp1, Address(tmp1, 0));
10304 
10305   //    Q2 = TABLEExt[n][B >> 8 & 0xFF];
10306   movl(tmp2, in);
10307   shrl(tmp2, 8);
10308   andl(tmp2, 0x000000FF);
10309   shll(tmp2, 3);
10310   addq(tmp2, tmp3);
10311   movq(tmp2, Address(tmp2, 0));
10312 
10313   shlq(tmp2, 8);
10314   xorq(tmp1, tmp2);
10315 
10316   //    Q3 = TABLEExt[n][B >> 16 & 0xFF];
10317   movl(tmp2, in);
10318   shrl(tmp2, 16);
10319   andl(tmp2, 0x000000FF);
10320   shll(tmp2, 3);
10321   addq(tmp2, tmp3);
10322   movq(tmp2, Address(tmp2, 0));
10323 
10324   shlq(tmp2, 16);
10325   xorq(tmp1, tmp2);
10326 
10327   //    Q4 = TABLEExt[n][B >> 24 & 0xFF];
10328   shrl(in, 24);
10329   andl(in, 0x000000FF);
10330   shll(in, 3);
10331   addq(in, tmp3);
10332   movq(in, Address(in, 0));
10333 
10334   shlq(in, 24);
10335   xorq(in, tmp1);
10336   //    return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
10337 }
10338 
10339 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
10340                                       Register in_out,
10341                                       uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
10342                                       XMMRegister w_xtmp2,
10343                                       Register tmp1,
10344                                       Register n_tmp2, Register n_tmp3) {
10345   if (is_pclmulqdq_supported) {
10346     movdl(w_xtmp1, in_out); // modified blindly
10347 
10348     movl(tmp1, const_or_pre_comp_const_index);
10349     movdl(w_xtmp2, tmp1);
10350     pclmulqdq(w_xtmp1, w_xtmp2, 0);
10351 
10352     movdq(in_out, w_xtmp1);
10353   } else {
10354     crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
10355   }
10356 }
10357 
10358 // Recombination Alternative 2: No bit-reflections
10359 // T1 = (CRC_A * U1) << 1
10360 // T2 = (CRC_B * U2) << 1
10361 // C1 = T1 >> 32
10362 // C2 = T2 >> 32
10363 // T1 = T1 & 0xFFFFFFFF
10364 // T2 = T2 & 0xFFFFFFFF
10365 // T1 = CRC32(0, T1)
10366 // T2 = CRC32(0, T2)
10367 // C1 = C1 ^ T1
10368 // C2 = C2 ^ T2
10369 // CRC = C1 ^ C2 ^ CRC_C
10370 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
10371                                      XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
10372                                      Register tmp1, Register tmp2,
10373                                      Register n_tmp3) {
10374   crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
10375   crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
10376   shlq(in_out, 1);
10377   movl(tmp1, in_out);
10378   shrq(in_out, 32);
10379   xorl(tmp2, tmp2);
10380   crc32(tmp2, tmp1, 4);
10381   xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
10382   shlq(in1, 1);
10383   movl(tmp1, in1);
10384   shrq(in1, 32);
10385   xorl(tmp2, tmp2);
10386   crc32(tmp2, tmp1, 4);
10387   xorl(in1, tmp2);
10388   xorl(in_out, in1);
10389   xorl(in_out, in2);
10390 }
10391 
10392 // Set N to predefined value
10393 // Subtract from a lenght of a buffer
10394 // execute in a loop:
10395 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
10396 // for i = 1 to N do
10397 //  CRC_A = CRC32(CRC_A, A[i])
10398 //  CRC_B = CRC32(CRC_B, B[i])
10399 //  CRC_C = CRC32(CRC_C, C[i])
10400 // end for
10401 // Recombine
10402 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
10403                                        Register in_out1, Register in_out2, Register in_out3,
10404                                        Register tmp1, Register tmp2, Register tmp3,
10405                                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
10406                                        Register tmp4, Register tmp5,
10407                                        Register n_tmp6) {
10408   Label L_processPartitions;
10409   Label L_processPartition;
10410   Label L_exit;
10411 
10412   bind(L_processPartitions);
10413   cmpl(in_out1, 3 * size);
10414   jcc(Assembler::less, L_exit);
10415     xorl(tmp1, tmp1);
10416     xorl(tmp2, tmp2);
10417     movq(tmp3, in_out2);
10418     addq(tmp3, size);
10419 
10420     bind(L_processPartition);
10421       crc32(in_out3, Address(in_out2, 0), 8);
10422       crc32(tmp1, Address(in_out2, size), 8);
10423       crc32(tmp2, Address(in_out2, size * 2), 8);
10424       addq(in_out2, 8);
10425       cmpq(in_out2, tmp3);
10426       jcc(Assembler::less, L_processPartition);
10427     crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
10428             w_xtmp1, w_xtmp2, w_xtmp3,
10429             tmp4, tmp5,
10430             n_tmp6);
10431     addq(in_out2, 2 * size);
10432     subl(in_out1, 3 * size);
10433     jmp(L_processPartitions);
10434 
10435   bind(L_exit);
10436 }
10437 #else
10438 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n,
10439                                      Register tmp1, Register tmp2, Register tmp3,
10440                                      XMMRegister xtmp1, XMMRegister xtmp2) {
10441   lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
10442   if (n > 0) {
10443     addl(tmp3, n * 256 * 8);
10444   }
10445   //    Q1 = TABLEExt[n][B & 0xFF];
10446   movl(tmp1, in_out);
10447   andl(tmp1, 0x000000FF);
10448   shll(tmp1, 3);
10449   addl(tmp1, tmp3);
10450   movq(xtmp1, Address(tmp1, 0));
10451 
10452   //    Q2 = TABLEExt[n][B >> 8 & 0xFF];
10453   movl(tmp2, in_out);
10454   shrl(tmp2, 8);
10455   andl(tmp2, 0x000000FF);
10456   shll(tmp2, 3);
10457   addl(tmp2, tmp3);
10458   movq(xtmp2, Address(tmp2, 0));
10459 
10460   psllq(xtmp2, 8);
10461   pxor(xtmp1, xtmp2);
10462 
10463   //    Q3 = TABLEExt[n][B >> 16 & 0xFF];
10464   movl(tmp2, in_out);
10465   shrl(tmp2, 16);
10466   andl(tmp2, 0x000000FF);
10467   shll(tmp2, 3);
10468   addl(tmp2, tmp3);
10469   movq(xtmp2, Address(tmp2, 0));
10470 
10471   psllq(xtmp2, 16);
10472   pxor(xtmp1, xtmp2);
10473 
10474   //    Q4 = TABLEExt[n][B >> 24 & 0xFF];
10475   shrl(in_out, 24);
10476   andl(in_out, 0x000000FF);
10477   shll(in_out, 3);
10478   addl(in_out, tmp3);
10479   movq(xtmp2, Address(in_out, 0));
10480 
10481   psllq(xtmp2, 24);
10482   pxor(xtmp1, xtmp2); // Result in CXMM
10483   //    return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
10484 }
10485 
10486 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
10487                                       Register in_out,
10488                                       uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
10489                                       XMMRegister w_xtmp2,
10490                                       Register tmp1,
10491                                       Register n_tmp2, Register n_tmp3) {
10492   if (is_pclmulqdq_supported) {
10493     movdl(w_xtmp1, in_out);
10494 
10495     movl(tmp1, const_or_pre_comp_const_index);
10496     movdl(w_xtmp2, tmp1);
10497     pclmulqdq(w_xtmp1, w_xtmp2, 0);
10498     // Keep result in XMM since GPR is 32 bit in length
10499   } else {
10500     crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2);
10501   }
10502 }
10503 
10504 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
10505                                      XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
10506                                      Register tmp1, Register tmp2,
10507                                      Register n_tmp3) {
10508   crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
10509   crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
10510 
10511   psllq(w_xtmp1, 1);
10512   movdl(tmp1, w_xtmp1);
10513   psrlq(w_xtmp1, 32);
10514   movdl(in_out, w_xtmp1);
10515 
10516   xorl(tmp2, tmp2);
10517   crc32(tmp2, tmp1, 4);
10518   xorl(in_out, tmp2);
10519 
10520   psllq(w_xtmp2, 1);
10521   movdl(tmp1, w_xtmp2);
10522   psrlq(w_xtmp2, 32);
10523   movdl(in1, w_xtmp2);
10524 
10525   xorl(tmp2, tmp2);
10526   crc32(tmp2, tmp1, 4);
10527   xorl(in1, tmp2);
10528   xorl(in_out, in1);
10529   xorl(in_out, in2);
10530 }
10531 
10532 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
10533                                        Register in_out1, Register in_out2, Register in_out3,
10534                                        Register tmp1, Register tmp2, Register tmp3,
10535                                        XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
10536                                        Register tmp4, Register tmp5,
10537                                        Register n_tmp6) {
10538   Label L_processPartitions;
10539   Label L_processPartition;
10540   Label L_exit;
10541 
10542   bind(L_processPartitions);
10543   cmpl(in_out1, 3 * size);
10544   jcc(Assembler::less, L_exit);
10545     xorl(tmp1, tmp1);
10546     xorl(tmp2, tmp2);
10547     movl(tmp3, in_out2);
10548     addl(tmp3, size);
10549 
10550     bind(L_processPartition);
10551       crc32(in_out3, Address(in_out2, 0), 4);
10552       crc32(tmp1, Address(in_out2, size), 4);
10553       crc32(tmp2, Address(in_out2, size*2), 4);
10554       crc32(in_out3, Address(in_out2, 0+4), 4);
10555       crc32(tmp1, Address(in_out2, size+4), 4);
10556       crc32(tmp2, Address(in_out2, size*2+4), 4);
10557       addl(in_out2, 8);
10558       cmpl(in_out2, tmp3);
10559       jcc(Assembler::less, L_processPartition);
10560 
10561         push(tmp3);
10562         push(in_out1);
10563         push(in_out2);
10564         tmp4 = tmp3;
10565         tmp5 = in_out1;
10566         n_tmp6 = in_out2;
10567 
10568       crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
10569             w_xtmp1, w_xtmp2, w_xtmp3,
10570             tmp4, tmp5,
10571             n_tmp6);
10572 
10573         pop(in_out2);
10574         pop(in_out1);
10575         pop(tmp3);
10576 
10577     addl(in_out2, 2 * size);
10578     subl(in_out1, 3 * size);
10579     jmp(L_processPartitions);
10580 
10581   bind(L_exit);
10582 }
10583 #endif //LP64
10584 
10585 #ifdef _LP64
10586 // Algorithm 2: Pipelined usage of the CRC32 instruction.
10587 // Input: A buffer I of L bytes.
10588 // Output: the CRC32C value of the buffer.
10589 // Notations:
10590 // Write L = 24N + r, with N = floor (L/24).
10591 // r = L mod 24 (0 <= r < 24).
10592 // Consider I as the concatenation of A|B|C|R, where A, B, C, each,
10593 // N quadwords, and R consists of r bytes.
10594 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
10595 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
10596 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
10597 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
10598 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
10599                                           Register tmp1, Register tmp2, Register tmp3,
10600                                           Register tmp4, Register tmp5, Register tmp6,
10601                                           XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
10602                                           bool is_pclmulqdq_supported) {
10603   uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
10604   Label L_wordByWord;
10605   Label L_byteByByteProlog;
10606   Label L_byteByByte;
10607   Label L_exit;
10608 
10609   if (is_pclmulqdq_supported ) {
10610     const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
10611     const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1);
10612 
10613     const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
10614     const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
10615 
10616     const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
10617     const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
10618     assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
10619   } else {
10620     const_or_pre_comp_const_index[0] = 1;
10621     const_or_pre_comp_const_index[1] = 0;
10622 
10623     const_or_pre_comp_const_index[2] = 3;
10624     const_or_pre_comp_const_index[3] = 2;
10625 
10626     const_or_pre_comp_const_index[4] = 5;
10627     const_or_pre_comp_const_index[5] = 4;
10628    }
10629   crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
10630                     in2, in1, in_out,
10631                     tmp1, tmp2, tmp3,
10632                     w_xtmp1, w_xtmp2, w_xtmp3,
10633                     tmp4, tmp5,
10634                     tmp6);
10635   crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
10636                     in2, in1, in_out,
10637                     tmp1, tmp2, tmp3,
10638                     w_xtmp1, w_xtmp2, w_xtmp3,
10639                     tmp4, tmp5,
10640                     tmp6);
10641   crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
10642                     in2, in1, in_out,
10643                     tmp1, tmp2, tmp3,
10644                     w_xtmp1, w_xtmp2, w_xtmp3,
10645                     tmp4, tmp5,
10646                     tmp6);
10647   movl(tmp1, in2);
10648   andl(tmp1, 0x00000007);
10649   negl(tmp1);
10650   addl(tmp1, in2);
10651   addq(tmp1, in1);
10652 
10653   BIND(L_wordByWord);
10654   cmpq(in1, tmp1);
10655   jcc(Assembler::greaterEqual, L_byteByByteProlog);
10656     crc32(in_out, Address(in1, 0), 4);
10657     addq(in1, 4);
10658     jmp(L_wordByWord);
10659 
10660   BIND(L_byteByByteProlog);
10661   andl(in2, 0x00000007);
10662   movl(tmp2, 1);
10663 
10664   BIND(L_byteByByte);
10665   cmpl(tmp2, in2);
10666   jccb(Assembler::greater, L_exit);
10667     crc32(in_out, Address(in1, 0), 1);
10668     incq(in1);
10669     incl(tmp2);
10670     jmp(L_byteByByte);
10671 
10672   BIND(L_exit);
10673 }
10674 #else
10675 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
10676                                           Register tmp1, Register  tmp2, Register tmp3,
10677                                           Register tmp4, Register  tmp5, Register tmp6,
10678                                           XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
10679                                           bool is_pclmulqdq_supported) {
10680   uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
10681   Label L_wordByWord;
10682   Label L_byteByByteProlog;
10683   Label L_byteByByte;
10684   Label L_exit;
10685 
10686   if (is_pclmulqdq_supported) {
10687     const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
10688     const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1);
10689 
10690     const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
10691     const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
10692 
10693     const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
10694     const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
10695   } else {
10696     const_or_pre_comp_const_index[0] = 1;
10697     const_or_pre_comp_const_index[1] = 0;
10698 
10699     const_or_pre_comp_const_index[2] = 3;
10700     const_or_pre_comp_const_index[3] = 2;
10701 
10702     const_or_pre_comp_const_index[4] = 5;
10703     const_or_pre_comp_const_index[5] = 4;
10704   }
10705   crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
10706                     in2, in1, in_out,
10707                     tmp1, tmp2, tmp3,
10708                     w_xtmp1, w_xtmp2, w_xtmp3,
10709                     tmp4, tmp5,
10710                     tmp6);
10711   crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
10712                     in2, in1, in_out,
10713                     tmp1, tmp2, tmp3,
10714                     w_xtmp1, w_xtmp2, w_xtmp3,
10715                     tmp4, tmp5,
10716                     tmp6);
10717   crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
10718                     in2, in1, in_out,
10719                     tmp1, tmp2, tmp3,
10720                     w_xtmp1, w_xtmp2, w_xtmp3,
10721                     tmp4, tmp5,
10722                     tmp6);
10723   movl(tmp1, in2);
10724   andl(tmp1, 0x00000007);
10725   negl(tmp1);
10726   addl(tmp1, in2);
10727   addl(tmp1, in1);
10728 
10729   BIND(L_wordByWord);
10730   cmpl(in1, tmp1);
10731   jcc(Assembler::greaterEqual, L_byteByByteProlog);
10732     crc32(in_out, Address(in1,0), 4);
10733     addl(in1, 4);
10734     jmp(L_wordByWord);
10735 
10736   BIND(L_byteByByteProlog);
10737   andl(in2, 0x00000007);
10738   movl(tmp2, 1);
10739 
10740   BIND(L_byteByByte);
10741   cmpl(tmp2, in2);
10742   jccb(Assembler::greater, L_exit);
10743     movb(tmp1, Address(in1, 0));
10744     crc32(in_out, tmp1, 1);
10745     incl(in1);
10746     incl(tmp2);
10747     jmp(L_byteByByte);
10748 
10749   BIND(L_exit);
10750 }
10751 #endif // LP64
10752 #undef BIND
10753 #undef BLOCK_COMMENT
10754 
10755 
10756 // Compress char[] array to byte[].
10757 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
10758                                          XMMRegister tmp1Reg, XMMRegister tmp2Reg,
10759                                          XMMRegister tmp3Reg, XMMRegister tmp4Reg,
10760                                          Register tmp5, Register result) {
10761   Label copy_chars_loop, return_length, return_zero, done;
10762 
10763   // rsi: src
10764   // rdi: dst
10765   // rdx: len
10766   // rcx: tmp5
10767   // rax: result
10768 
10769   // rsi holds start addr of source char[] to be compressed
10770   // rdi holds start addr of destination byte[]
10771   // rdx holds length
10772 
10773   assert(len != result, "");
10774 
10775   // save length for return
10776   push(len);
10777 
10778   if (UseSSE42Intrinsics) {
10779     assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
10780     Label copy_32_loop, copy_16, copy_tail;
10781 
10782     movl(result, len);
10783     movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vectors
10784 
10785     // vectored compression
10786     andl(len, 0xfffffff0);    // vector count (in chars)
10787     andl(result, 0x0000000f);    // tail count (in chars)
10788     testl(len, len);
10789     jccb(Assembler::zero, copy_16);
10790 
10791     // compress 16 chars per iter
10792     movdl(tmp1Reg, tmp5);
10793     pshufd(tmp1Reg, tmp1Reg, 0);   // store Unicode mask in tmp1Reg
10794     pxor(tmp4Reg, tmp4Reg);
10795 
10796     lea(src, Address(src, len, Address::times_2));
10797     lea(dst, Address(dst, len, Address::times_1));
10798     negptr(len);
10799 
10800     bind(copy_32_loop);
10801     movdqu(tmp2Reg, Address(src, len, Address::times_2));     // load 1st 8 characters
10802     por(tmp4Reg, tmp2Reg);
10803     movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters
10804     por(tmp4Reg, tmp3Reg);
10805     ptest(tmp4Reg, tmp1Reg);       // check for Unicode chars in next vector
10806     jcc(Assembler::notZero, return_zero);
10807     packuswb(tmp2Reg, tmp3Reg);    // only ASCII chars; compress each to 1 byte
10808     movdqu(Address(dst, len, Address::times_1), tmp2Reg);
10809     addptr(len, 16);
10810     jcc(Assembler::notZero, copy_32_loop);
10811 
10812     // compress next vector of 8 chars (if any)
10813     bind(copy_16);
10814     movl(len, result);
10815     andl(len, 0xfffffff8);    // vector count (in chars)
10816     andl(result, 0x00000007);    // tail count (in chars)
10817     testl(len, len);
10818     jccb(Assembler::zero, copy_tail);
10819 
10820     movdl(tmp1Reg, tmp5);
10821     pshufd(tmp1Reg, tmp1Reg, 0);   // store Unicode mask in tmp1Reg
10822     pxor(tmp3Reg, tmp3Reg);
10823 
10824     movdqu(tmp2Reg, Address(src, 0));
10825     ptest(tmp2Reg, tmp1Reg);       // check for Unicode chars in vector
10826     jccb(Assembler::notZero, return_zero);
10827     packuswb(tmp2Reg, tmp3Reg);    // only LATIN1 chars; compress each to 1 byte
10828     movq(Address(dst, 0), tmp2Reg);
10829     addptr(src, 16);
10830     addptr(dst, 8);
10831 
10832     bind(copy_tail);
10833     movl(len, result);
10834   }
10835   // compress 1 char per iter
10836   testl(len, len);
10837   jccb(Assembler::zero, return_length);
10838   lea(src, Address(src, len, Address::times_2));
10839   lea(dst, Address(dst, len, Address::times_1));
10840   negptr(len);
10841 
10842   bind(copy_chars_loop);
10843   load_unsigned_short(result, Address(src, len, Address::times_2));
10844   testl(result, 0xff00);      // check if Unicode char
10845   jccb(Assembler::notZero, return_zero);
10846   movb(Address(dst, len, Address::times_1), result);  // ASCII char; compress to 1 byte
10847   increment(len);
10848   jcc(Assembler::notZero, copy_chars_loop);
10849 
10850   // if compression succeeded, return length
10851   bind(return_length);
10852   pop(result);
10853   jmpb(done);
10854 
10855   // if compression failed, return 0
10856   bind(return_zero);
10857   xorl(result, result);
10858   addptr(rsp, wordSize);
10859 
10860   bind(done);
10861 }
10862 
10863 // Inflate byte[] array to char[].
10864 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
10865                                         XMMRegister tmp1, Register tmp2) {
10866   Label copy_chars_loop, done;
10867 
10868   // rsi: src
10869   // rdi: dst
10870   // rdx: len
10871   // rcx: tmp2
10872 
10873   // rsi holds start addr of source byte[] to be inflated
10874   // rdi holds start addr of destination char[]
10875   // rdx holds length
10876   assert_different_registers(src, dst, len, tmp2);
10877 
10878   if (UseSSE42Intrinsics) {
10879     assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
10880     Label copy_8_loop, copy_bytes, copy_tail;
10881 
10882     movl(tmp2, len);
10883     andl(tmp2, 0x00000007);   // tail count (in chars)
10884     andl(len, 0xfffffff8);    // vector count (in chars)
10885     jccb(Assembler::zero, copy_tail);
10886 
10887     // vectored inflation
10888     lea(src, Address(src, len, Address::times_1));
10889     lea(dst, Address(dst, len, Address::times_2));
10890     negptr(len);
10891 
10892     // inflate 8 chars per iter
10893     bind(copy_8_loop);
10894     pmovzxbw(tmp1, Address(src, len, Address::times_1));  // unpack to 8 words
10895     movdqu(Address(dst, len, Address::times_2), tmp1);
10896     addptr(len, 8);
10897     jcc(Assembler::notZero, copy_8_loop);
10898 
10899     bind(copy_tail);
10900     movl(len, tmp2);
10901 
10902     cmpl(len, 4);
10903     jccb(Assembler::less, copy_bytes);
10904 
10905     movdl(tmp1, Address(src, 0));  // load 4 byte chars
10906     pmovzxbw(tmp1, tmp1);
10907     movq(Address(dst, 0), tmp1);
10908     subptr(len, 4);
10909     addptr(src, 4);
10910     addptr(dst, 8);
10911 
10912     bind(copy_bytes);
10913   }
10914   testl(len, len);
10915   jccb(Assembler::zero, done);
10916   lea(src, Address(src, len, Address::times_1));
10917   lea(dst, Address(dst, len, Address::times_2));
10918   negptr(len);
10919 
10920   // inflate 1 char per iter
10921   bind(copy_chars_loop);
10922   load_unsigned_byte(tmp2, Address(src, len, Address::times_1));  // load byte char
10923   movw(Address(dst, len, Address::times_2), tmp2);  // inflate byte char to word
10924   increment(len);
10925   jcc(Assembler::notZero, copy_chars_loop);
10926 
10927   bind(done);
10928 }
10929 
10930 
10931 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
10932   switch (cond) {
10933     // Note some conditions are synonyms for others
10934     case Assembler::zero:         return Assembler::notZero;
10935     case Assembler::notZero:      return Assembler::zero;
10936     case Assembler::less:         return Assembler::greaterEqual;
10937     case Assembler::lessEqual:    return Assembler::greater;
10938     case Assembler::greater:      return Assembler::lessEqual;
10939     case Assembler::greaterEqual: return Assembler::less;
10940     case Assembler::below:        return Assembler::aboveEqual;
10941     case Assembler::belowEqual:   return Assembler::above;
10942     case Assembler::above:        return Assembler::belowEqual;
10943     case Assembler::aboveEqual:   return Assembler::below;
10944     case Assembler::overflow:     return Assembler::noOverflow;
10945     case Assembler::noOverflow:   return Assembler::overflow;
10946     case Assembler::negative:     return Assembler::positive;
10947     case Assembler::positive:     return Assembler::negative;
10948     case Assembler::parity:       return Assembler::noParity;
10949     case Assembler::noParity:     return Assembler::parity;
10950   }
10951   ShouldNotReachHere(); return Assembler::overflow;
10952 }
10953 
10954 SkipIfEqual::SkipIfEqual(
10955     MacroAssembler* masm, const bool* flag_addr, bool value) {
10956   _masm = masm;
10957   _masm->cmp8(ExternalAddress((address)flag_addr), value);
10958   _masm->jcc(Assembler::equal, _label);
10959 }
10960 
10961 SkipIfEqual::~SkipIfEqual() {
10962   _masm->bind(_label);
10963 }
10964 
10965 // 32-bit Windows has its own fast-path implementation
10966 // of get_thread
10967 #if !defined(WIN32) || defined(_LP64)
10968 
10969 // This is simply a call to Thread::current()
10970 void MacroAssembler::get_thread(Register thread) {
10971   if (thread != rax) {
10972     push(rax);
10973   }
10974   LP64_ONLY(push(rdi);)
10975   LP64_ONLY(push(rsi);)
10976   push(rdx);
10977   push(rcx);
10978 #ifdef _LP64
10979   push(r8);
10980   push(r9);
10981   push(r10);
10982   push(r11);
10983 #endif
10984 
10985   MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
10986 
10987 #ifdef _LP64
10988   pop(r11);
10989   pop(r10);
10990   pop(r9);
10991   pop(r8);
10992 #endif
10993   pop(rcx);
10994   pop(rdx);
10995   LP64_ONLY(pop(rsi);)
10996   LP64_ONLY(pop(rdi);)
10997   if (thread != rax) {
10998     mov(thread, rax);
10999     pop(rax);
11000   }
11001 }
11002 
11003 #endif