1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP
  27 #define CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP
  28 
  29 #include "asm/assembler.hpp"
  30 
  31 // MacroAssembler extends Assembler by frequently used macros.
  32 //
  33 // Instructions for which a 'better' code sequence exists depending
  34 // on arguments should also go in here.
  35 
  36 class MacroAssembler: public Assembler {
  37   friend class LIR_Assembler;
  38 
  39  public:
  40   using Assembler::mov;
  41   using Assembler::movi;
  42 
  43  protected:
  44 
  45   // Support for VM calls
  46   //
  47   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  48   // may customize this version by overriding it for its purposes (e.g., to save/restore
  49   // additional registers when doing a VM call).
  50   virtual void call_VM_leaf_base(
  51     address entry_point,               // the entry point
  52     int     number_of_arguments,        // the number of arguments to pop after the call
  53     Label *retaddr = NULL
  54   );
  55 
  56   virtual void call_VM_leaf_base(
  57     address entry_point,               // the entry point
  58     int     number_of_arguments,        // the number of arguments to pop after the call
  59     Label &retaddr) {
  60     call_VM_leaf_base(entry_point, number_of_arguments, &retaddr);
  61   }
  62 
  63   // This is the base routine called by the different versions of call_VM. The interpreter
  64   // may customize this version by overriding it for its purposes (e.g., to save/restore
  65   // additional registers when doing a VM call).
  66   //
  67   // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base
  68   // returns the register which contains the thread upon return. If a thread register has been
  69   // specified, the return value will correspond to that register. If no last_java_sp is specified
  70   // (noreg) than rsp will be used instead.
  71   virtual void call_VM_base(           // returns the register containing the thread upon return
  72     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  73     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  74     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  75     address  entry_point,              // the entry point
  76     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  77     bool     check_exceptions          // whether to check for pending exceptions after return
  78   );
  79 
  80   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  81 
  82   // True if an XOR can be used to expand narrow klass references.
  83   bool use_XOR_for_compressed_class_base;
  84 
  85  public:
  86   MacroAssembler(CodeBuffer* code) : Assembler(code) {
  87     use_XOR_for_compressed_class_base
  88       = (operand_valid_for_logical_immediate(false /*is32*/,
  89                                              (uint64_t)Universe::narrow_klass_base())
  90          && ((uint64_t)Universe::narrow_klass_base()
  91              > (1UL << log2_intptr(Universe::narrow_klass_range()))));
  92   }
  93 
  94  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  95  // The implementation is only non-empty for the InterpreterMacroAssembler,
  96  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  97  virtual void check_and_handle_popframe(Register java_thread);
  98  virtual void check_and_handle_earlyret(Register java_thread);
  99 
 100   void safepoint_poll(Label& slow_path);
 101   void safepoint_poll_acquire(Label& slow_path);
 102 
 103   // Biased locking support
 104   // lock_reg and obj_reg must be loaded up with the appropriate values.
 105   // swap_reg is killed.
 106   // tmp_reg must be supplied and must not be rscratch1 or rscratch2
 107   // Optional slow case is for implementations (interpreter and C1) which branch to
 108   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 109   // Returns offset of first potentially-faulting instruction for null
 110   // check info (currently consumed only by C1). If
 111   // swap_reg_contains_mark is true then returns -1 as it is assumed
 112   // the calling code has already passed any potential faults.
 113   int biased_locking_enter(Register lock_reg, Register obj_reg,
 114                            Register swap_reg, Register tmp_reg,
 115                            bool swap_reg_contains_mark,
 116                            Label& done, Label* slow_case = NULL,
 117                            BiasedLockingCounters* counters = NULL);
 118   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 119 
 120 
 121   // Helper functions for statistics gathering.
 122   // Unconditional atomic increment.
 123   void atomic_incw(Register counter_addr, Register tmp, Register tmp2);
 124   void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) {
 125     lea(tmp1, counter_addr);
 126     atomic_incw(tmp1, tmp2, tmp3);
 127   }
 128   // Load Effective Address
 129   void lea(Register r, const Address &a) {
 130     InstructionMark im(this);
 131     code_section()->relocate(inst_mark(), a.rspec());
 132     a.lea(this, r);
 133   }
 134 
 135   void addmw(Address a, Register incr, Register scratch) {
 136     ldrw(scratch, a);
 137     addw(scratch, scratch, incr);
 138     strw(scratch, a);
 139   }
 140 
 141   // Add constant to memory word
 142   void addmw(Address a, int imm, Register scratch) {
 143     ldrw(scratch, a);
 144     if (imm > 0)
 145       addw(scratch, scratch, (unsigned)imm);
 146     else
 147       subw(scratch, scratch, (unsigned)-imm);
 148     strw(scratch, a);
 149   }
 150 
 151   void bind(Label& L) {
 152     Assembler::bind(L);
 153     code()->clear_last_insn();
 154   }
 155 
 156   void membar(Membar_mask_bits order_constraint);
 157 
 158   using Assembler::ldr;
 159   using Assembler::str;
 160 
 161   void ldr(Register Rx, const Address &adr);
 162   void ldrw(Register Rw, const Address &adr);
 163   void str(Register Rx, const Address &adr);
 164   void strw(Register Rx, const Address &adr);
 165 
 166   // Frame creation and destruction shared between JITs.
 167   void build_frame(int framesize);
 168   void remove_frame(int framesize);
 169 
 170   virtual void _call_Unimplemented(address call_site) {
 171     mov(rscratch2, call_site);
 172     haltsim();
 173   }
 174 
 175 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
 176 
 177   virtual void notify(int type);
 178 
 179   // aliases defined in AARCH64 spec
 180 
 181   template<class T>
 182   inline void cmpw(Register Rd, T imm)  { subsw(zr, Rd, imm); }
 183   // imm is limited to 12 bits.
 184   inline void cmp(Register Rd, unsigned imm)  { subs(zr, Rd, imm); }
 185   inline void cmp(Register Rd, Register Rn, unsigned imm)  { subs(Rd, Rn, imm); }
 186 
 187   inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); }
 188   inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); }
 189 
 190   void cset(Register Rd, Assembler::Condition cond) {
 191     csinc(Rd, zr, zr, ~cond);
 192   }
 193   void csetw(Register Rd, Assembler::Condition cond) {
 194     csincw(Rd, zr, zr, ~cond);
 195   }
 196 
 197   void cneg(Register Rd, Register Rn, Assembler::Condition cond) {
 198     csneg(Rd, Rn, Rn, ~cond);
 199   }
 200   void cnegw(Register Rd, Register Rn, Assembler::Condition cond) {
 201     csnegw(Rd, Rn, Rn, ~cond);
 202   }
 203 
 204   inline void movw(Register Rd, Register Rn) {
 205     if (Rd == sp || Rn == sp) {
 206       addw(Rd, Rn, 0U);
 207     } else {
 208       orrw(Rd, zr, Rn);
 209     }
 210   }
 211   inline void mov(Register Rd, Register Rn) {
 212     assert(Rd != r31_sp && Rn != r31_sp, "should be");
 213     if (Rd == Rn) {
 214     } else if (Rd == sp || Rn == sp) {
 215       add(Rd, Rn, 0U);
 216     } else {
 217       orr(Rd, zr, Rn);
 218     }
 219   }
 220 
 221   inline void moviw(Register Rd, unsigned imm) { orrw(Rd, zr, imm); }
 222   inline void movi(Register Rd, unsigned imm) { orr(Rd, zr, imm); }
 223 
 224   inline void tstw(Register Rd, Register Rn) { andsw(zr, Rd, Rn); }
 225   inline void tst(Register Rd, Register Rn) { ands(zr, Rd, Rn); }
 226 
 227   inline void tstw(Register Rd, uint64_t imm) { andsw(zr, Rd, imm); }
 228   inline void tst(Register Rd, uint64_t imm) { ands(zr, Rd, imm); }
 229 
 230   inline void bfiw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 231     bfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1));
 232   }
 233   inline void bfi(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 234     bfm(Rd, Rn, ((64 - lsb) & 63), (width - 1));
 235   }
 236 
 237   inline void bfxilw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 238     bfmw(Rd, Rn, lsb, (lsb + width - 1));
 239   }
 240   inline void bfxil(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 241     bfm(Rd, Rn, lsb , (lsb + width - 1));
 242   }
 243 
 244   inline void sbfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 245     sbfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1));
 246   }
 247   inline void sbfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 248     sbfm(Rd, Rn, ((64 - lsb) & 63), (width - 1));
 249   }
 250 
 251   inline void sbfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 252     sbfmw(Rd, Rn, lsb, (lsb + width - 1));
 253   }
 254   inline void sbfx(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 255     sbfm(Rd, Rn, lsb , (lsb + width - 1));
 256   }
 257 
 258   inline void ubfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 259     ubfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1));
 260   }
 261   inline void ubfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 262     ubfm(Rd, Rn, ((64 - lsb) & 63), (width - 1));
 263   }
 264 
 265   inline void ubfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 266     ubfmw(Rd, Rn, lsb, (lsb + width - 1));
 267   }
 268   inline void ubfx(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 269     ubfm(Rd, Rn, lsb , (lsb + width - 1));
 270   }
 271 
 272   inline void asrw(Register Rd, Register Rn, unsigned imm) {
 273     sbfmw(Rd, Rn, imm, 31);
 274   }
 275 
 276   inline void asr(Register Rd, Register Rn, unsigned imm) {
 277     sbfm(Rd, Rn, imm, 63);
 278   }
 279 
 280   inline void lslw(Register Rd, Register Rn, unsigned imm) {
 281     ubfmw(Rd, Rn, ((32 - imm) & 31), (31 - imm));
 282   }
 283 
 284   inline void lsl(Register Rd, Register Rn, unsigned imm) {
 285     ubfm(Rd, Rn, ((64 - imm) & 63), (63 - imm));
 286   }
 287 
 288   inline void lsrw(Register Rd, Register Rn, unsigned imm) {
 289     ubfmw(Rd, Rn, imm, 31);
 290   }
 291 
 292   inline void lsr(Register Rd, Register Rn, unsigned imm) {
 293     ubfm(Rd, Rn, imm, 63);
 294   }
 295 
 296   inline void rorw(Register Rd, Register Rn, unsigned imm) {
 297     extrw(Rd, Rn, Rn, imm);
 298   }
 299 
 300   inline void ror(Register Rd, Register Rn, unsigned imm) {
 301     extr(Rd, Rn, Rn, imm);
 302   }
 303 
 304   inline void sxtbw(Register Rd, Register Rn) {
 305     sbfmw(Rd, Rn, 0, 7);
 306   }
 307   inline void sxthw(Register Rd, Register Rn) {
 308     sbfmw(Rd, Rn, 0, 15);
 309   }
 310   inline void sxtb(Register Rd, Register Rn) {
 311     sbfm(Rd, Rn, 0, 7);
 312   }
 313   inline void sxth(Register Rd, Register Rn) {
 314     sbfm(Rd, Rn, 0, 15);
 315   }
 316   inline void sxtw(Register Rd, Register Rn) {
 317     sbfm(Rd, Rn, 0, 31);
 318   }
 319 
 320   inline void uxtbw(Register Rd, Register Rn) {
 321     ubfmw(Rd, Rn, 0, 7);
 322   }
 323   inline void uxthw(Register Rd, Register Rn) {
 324     ubfmw(Rd, Rn, 0, 15);
 325   }
 326   inline void uxtb(Register Rd, Register Rn) {
 327     ubfm(Rd, Rn, 0, 7);
 328   }
 329   inline void uxth(Register Rd, Register Rn) {
 330     ubfm(Rd, Rn, 0, 15);
 331   }
 332   inline void uxtw(Register Rd, Register Rn) {
 333     ubfm(Rd, Rn, 0, 31);
 334   }
 335 
 336   inline void cmnw(Register Rn, Register Rm) {
 337     addsw(zr, Rn, Rm);
 338   }
 339   inline void cmn(Register Rn, Register Rm) {
 340     adds(zr, Rn, Rm);
 341   }
 342 
 343   inline void cmpw(Register Rn, Register Rm) {
 344     subsw(zr, Rn, Rm);
 345   }
 346   inline void cmp(Register Rn, Register Rm) {
 347     subs(zr, Rn, Rm);
 348   }
 349 
 350   inline void negw(Register Rd, Register Rn) {
 351     subw(Rd, zr, Rn);
 352   }
 353 
 354   inline void neg(Register Rd, Register Rn) {
 355     sub(Rd, zr, Rn);
 356   }
 357 
 358   inline void negsw(Register Rd, Register Rn) {
 359     subsw(Rd, zr, Rn);
 360   }
 361 
 362   inline void negs(Register Rd, Register Rn) {
 363     subs(Rd, zr, Rn);
 364   }
 365 
 366   inline void cmnw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) {
 367     addsw(zr, Rn, Rm, kind, shift);
 368   }
 369   inline void cmn(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) {
 370     adds(zr, Rn, Rm, kind, shift);
 371   }
 372 
 373   inline void cmpw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) {
 374     subsw(zr, Rn, Rm, kind, shift);
 375   }
 376   inline void cmp(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) {
 377     subs(zr, Rn, Rm, kind, shift);
 378   }
 379 
 380   inline void negw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) {
 381     subw(Rd, zr, Rn, kind, shift);
 382   }
 383 
 384   inline void neg(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) {
 385     sub(Rd, zr, Rn, kind, shift);
 386   }
 387 
 388   inline void negsw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) {
 389     subsw(Rd, zr, Rn, kind, shift);
 390   }
 391 
 392   inline void negs(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) {
 393     subs(Rd, zr, Rn, kind, shift);
 394   }
 395 
 396   inline void mnegw(Register Rd, Register Rn, Register Rm) {
 397     msubw(Rd, Rn, Rm, zr);
 398   }
 399   inline void mneg(Register Rd, Register Rn, Register Rm) {
 400     msub(Rd, Rn, Rm, zr);
 401   }
 402 
 403   inline void mulw(Register Rd, Register Rn, Register Rm) {
 404     maddw(Rd, Rn, Rm, zr);
 405   }
 406   inline void mul(Register Rd, Register Rn, Register Rm) {
 407     madd(Rd, Rn, Rm, zr);
 408   }
 409 
 410   inline void smnegl(Register Rd, Register Rn, Register Rm) {
 411     smsubl(Rd, Rn, Rm, zr);
 412   }
 413   inline void smull(Register Rd, Register Rn, Register Rm) {
 414     smaddl(Rd, Rn, Rm, zr);
 415   }
 416 
 417   inline void umnegl(Register Rd, Register Rn, Register Rm) {
 418     umsubl(Rd, Rn, Rm, zr);
 419   }
 420   inline void umull(Register Rd, Register Rn, Register Rm) {
 421     umaddl(Rd, Rn, Rm, zr);
 422   }
 423 
 424 #define WRAP(INSN)                                                            \
 425   void INSN(Register Rd, Register Rn, Register Rm, Register Ra) {             \
 426     if ((VM_Version::features() & VM_Version::CPU_A53MAC) && Ra != zr)        \
 427       nop();                                                                  \
 428     Assembler::INSN(Rd, Rn, Rm, Ra);                                          \
 429   }
 430 
 431   WRAP(madd) WRAP(msub) WRAP(maddw) WRAP(msubw)
 432   WRAP(smaddl) WRAP(smsubl) WRAP(umaddl) WRAP(umsubl)
 433 #undef WRAP
 434 
 435 
 436   // macro assembly operations needed for aarch64
 437 
 438   // first two private routines for loading 32 bit or 64 bit constants
 439 private:
 440 
 441   void mov_immediate64(Register dst, u_int64_t imm64);
 442   void mov_immediate32(Register dst, u_int32_t imm32);
 443 
 444   int push(unsigned int bitset, Register stack);
 445   int pop(unsigned int bitset, Register stack);
 446 
 447   void mov(Register dst, Address a);
 448 
 449 public:
 450   void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
 451   void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
 452 
 453   // Push and pop everything that might be clobbered by a native
 454   // runtime call except rscratch1 and rscratch2.  (They are always
 455   // scratch, so we don't have to protect them.)  Only save the lower
 456   // 64 bits of each vector register.
 457   void push_call_clobbered_registers();
 458   void pop_call_clobbered_registers();
 459 
 460   // now mov instructions for loading absolute addresses and 32 or
 461   // 64 bit integers
 462 
 463   inline void mov(Register dst, address addr)
 464   {
 465     mov_immediate64(dst, (u_int64_t)addr);
 466   }
 467 
 468   inline void mov(Register dst, u_int64_t imm64)
 469   {
 470     mov_immediate64(dst, imm64);
 471   }
 472 
 473   inline void movw(Register dst, u_int32_t imm32)
 474   {
 475     mov_immediate32(dst, imm32);
 476   }
 477 
 478   inline void mov(Register dst, long l)
 479   {
 480     mov(dst, (u_int64_t)l);
 481   }
 482 
 483   inline void mov(Register dst, int i)
 484   {
 485     mov(dst, (long)i);
 486   }
 487 
 488   void mov(Register dst, RegisterOrConstant src) {
 489     if (src.is_register())
 490       mov(dst, src.as_register());
 491     else
 492       mov(dst, src.as_constant());
 493   }
 494 
 495   void movptr(Register r, uintptr_t imm64);
 496 
 497   void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32);
 498 
 499   void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
 500     orr(Vd, T, Vn, Vn);
 501   }
 502 
 503 public:
 504 
 505   // Generalized Test Bit And Branch, including a "far" variety which
 506   // spans more than 32KiB.
 507   void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool far = false) {
 508     assert(cond == EQ || cond == NE, "must be");
 509 
 510     if (far)
 511       cond = ~cond;
 512 
 513     void (Assembler::* branch)(Register Rt, int bitpos, Label &L);
 514     if (cond == Assembler::EQ)
 515       branch = &Assembler::tbz;
 516     else
 517       branch = &Assembler::tbnz;
 518 
 519     if (far) {
 520       Label L;
 521       (this->*branch)(Rt, bitpos, L);
 522       b(dest);
 523       bind(L);
 524     } else {
 525       (this->*branch)(Rt, bitpos, dest);
 526     }
 527   }
 528 
 529   // macro instructions for accessing and updating floating point
 530   // status register
 531   //
 532   // FPSR : op1 == 011
 533   //        CRn == 0100
 534   //        CRm == 0100
 535   //        op2 == 001
 536 
 537   inline void get_fpsr(Register reg)
 538   {
 539     mrs(0b11, 0b0100, 0b0100, 0b001, reg);
 540   }
 541 
 542   inline void set_fpsr(Register reg)
 543   {
 544     msr(0b011, 0b0100, 0b0100, 0b001, reg);
 545   }
 546 
 547   inline void clear_fpsr()
 548   {
 549     msr(0b011, 0b0100, 0b0100, 0b001, zr);
 550   }
 551 
 552   // DCZID_EL0: op1 == 011
 553   //            CRn == 0000
 554   //            CRm == 0000
 555   //            op2 == 111
 556   inline void get_dczid_el0(Register reg)
 557   {
 558     mrs(0b011, 0b0000, 0b0000, 0b111, reg);
 559   }
 560 
 561   // CTR_EL0:   op1 == 011
 562   //            CRn == 0000
 563   //            CRm == 0000
 564   //            op2 == 001
 565   inline void get_ctr_el0(Register reg)
 566   {
 567     mrs(0b011, 0b0000, 0b0000, 0b001, reg);
 568   }
 569 
 570   // idiv variant which deals with MINLONG as dividend and -1 as divisor
 571   int corrected_idivl(Register result, Register ra, Register rb,
 572                       bool want_remainder, Register tmp = rscratch1);
 573   int corrected_idivq(Register result, Register ra, Register rb,
 574                       bool want_remainder, Register tmp = rscratch1);
 575 
 576   // Support for NULL-checks
 577   //
 578   // Generates code that causes a NULL OS exception if the content of reg is NULL.
 579   // If the accessed location is M[reg + offset] and the offset is known, provide the
 580   // offset. No explicit code generation is needed if the offset is within a certain
 581   // range (0 <= offset <= page_size).
 582 
 583   virtual void null_check(Register reg, int offset = -1);
 584   static bool needs_explicit_null_check(intptr_t offset);
 585 
 586   static address target_addr_for_insn(address insn_addr, unsigned insn);
 587   static address target_addr_for_insn(address insn_addr) {
 588     unsigned insn = *(unsigned*)insn_addr;
 589     return target_addr_for_insn(insn_addr, insn);
 590   }
 591 
 592   // Required platform-specific helpers for Label::patch_instructions.
 593   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 594   static int pd_patch_instruction_size(address branch, address target);
 595   static void pd_patch_instruction(address branch, address target) {
 596     pd_patch_instruction_size(branch, target);
 597   }
 598   static address pd_call_destination(address branch) {
 599     return target_addr_for_insn(branch);
 600   }
 601 #ifndef PRODUCT
 602   static void pd_print_patched_instruction(address branch);
 603 #endif
 604 
 605   static int patch_oop(address insn_addr, address o);
 606   static int patch_narrow_klass(address insn_addr, narrowKlass n);
 607 
 608   address emit_trampoline_stub(int insts_call_instruction_offset, address target);
 609 
 610   // The following 4 methods return the offset of the appropriate move instruction
 611 
 612   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 613   int load_unsigned_byte(Register dst, Address src);
 614   int load_unsigned_short(Register dst, Address src);
 615 
 616   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 617   int load_signed_byte(Register dst, Address src);
 618   int load_signed_short(Register dst, Address src);
 619 
 620   int load_signed_byte32(Register dst, Address src);
 621   int load_signed_short32(Register dst, Address src);
 622 
 623   // Support for sign-extension (hi:lo = extend_sign(lo))
 624   void extend_sign(Register hi, Register lo);
 625 
 626   // Load and store values by size and signed-ness
 627   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 628   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 629 
 630   // Support for inc/dec with optimal instruction selection depending on value
 631 
 632   // x86_64 aliases an unqualified register/address increment and
 633   // decrement to call incrementq and decrementq but also supports
 634   // explicitly sized calls to incrementq/decrementq or
 635   // incrementl/decrementl
 636 
 637   // for aarch64 the proper convention would be to use
 638   // increment/decrement for 64 bit operatons and
 639   // incrementw/decrementw for 32 bit operations. so when porting
 640   // x86_64 code we can leave calls to increment/decrement as is,
 641   // replace incrementq/decrementq with increment/decrement and
 642   // replace incrementl/decrementl with incrementw/decrementw.
 643 
 644   // n.b. increment/decrement calls with an Address destination will
 645   // need to use a scratch register to load the value to be
 646   // incremented. increment/decrement calls which add or subtract a
 647   // constant value greater than 2^12 will need to use a 2nd scratch
 648   // register to hold the constant. so, a register increment/decrement
 649   // may trash rscratch2 and an address increment/decrement trash
 650   // rscratch and rscratch2
 651 
 652   void decrementw(Address dst, int value = 1);
 653   void decrementw(Register reg, int value = 1);
 654 
 655   void decrement(Register reg, int value = 1);
 656   void decrement(Address dst, int value = 1);
 657 
 658   void incrementw(Address dst, int value = 1);
 659   void incrementw(Register reg, int value = 1);
 660 
 661   void increment(Register reg, int value = 1);
 662   void increment(Address dst, int value = 1);
 663 
 664 
 665   // Alignment
 666   void align(int modulus);
 667 
 668   // Stack frame creation/removal
 669   void enter()
 670   {
 671     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
 672     mov(rfp, sp);
 673   }
 674   void leave()
 675   {
 676     mov(sp, rfp);
 677     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
 678   }
 679 
 680   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 681   // The pointer will be loaded into the thread register.
 682   void get_thread(Register thread);
 683 
 684 
 685   // Support for VM calls
 686   //
 687   // It is imperative that all calls into the VM are handled via the call_VM macros.
 688   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 689   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 690 
 691 
 692   void call_VM(Register oop_result,
 693                address entry_point,
 694                bool check_exceptions = true);
 695   void call_VM(Register oop_result,
 696                address entry_point,
 697                Register arg_1,
 698                bool check_exceptions = true);
 699   void call_VM(Register oop_result,
 700                address entry_point,
 701                Register arg_1, Register arg_2,
 702                bool check_exceptions = true);
 703   void call_VM(Register oop_result,
 704                address entry_point,
 705                Register arg_1, Register arg_2, Register arg_3,
 706                bool check_exceptions = true);
 707 
 708   // Overloadings with last_Java_sp
 709   void call_VM(Register oop_result,
 710                Register last_java_sp,
 711                address entry_point,
 712                int number_of_arguments = 0,
 713                bool check_exceptions = true);
 714   void call_VM(Register oop_result,
 715                Register last_java_sp,
 716                address entry_point,
 717                Register arg_1, bool
 718                check_exceptions = true);
 719   void call_VM(Register oop_result,
 720                Register last_java_sp,
 721                address entry_point,
 722                Register arg_1, Register arg_2,
 723                bool check_exceptions = true);
 724   void call_VM(Register oop_result,
 725                Register last_java_sp,
 726                address entry_point,
 727                Register arg_1, Register arg_2, Register arg_3,
 728                bool check_exceptions = true);
 729 
 730   void get_vm_result  (Register oop_result, Register thread);
 731   void get_vm_result_2(Register metadata_result, Register thread);
 732 
 733   // These always tightly bind to MacroAssembler::call_VM_base
 734   // bypassing the virtual implementation
 735   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 736   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 737   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 738   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 739   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 740 
 741   void call_VM_leaf(address entry_point,
 742                     int number_of_arguments = 0);
 743   void call_VM_leaf(address entry_point,
 744                     Register arg_1);
 745   void call_VM_leaf(address entry_point,
 746                     Register arg_1, Register arg_2);
 747   void call_VM_leaf(address entry_point,
 748                     Register arg_1, Register arg_2, Register arg_3);
 749 
 750   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 751   // bypassing the virtual implementation
 752   void super_call_VM_leaf(address entry_point);
 753   void super_call_VM_leaf(address entry_point, Register arg_1);
 754   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 755   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 756   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 757 
 758   // last Java Frame (fills frame anchor)
 759   void set_last_Java_frame(Register last_java_sp,
 760                            Register last_java_fp,
 761                            address last_java_pc,
 762                            Register scratch);
 763 
 764   void set_last_Java_frame(Register last_java_sp,
 765                            Register last_java_fp,
 766                            Label &last_java_pc,
 767                            Register scratch);
 768 
 769   void set_last_Java_frame(Register last_java_sp,
 770                            Register last_java_fp,
 771                            Register last_java_pc,
 772                            Register scratch);
 773 
 774   void reset_last_Java_frame(Register thread);
 775 
 776   // thread in the default location (rthread)
 777   void reset_last_Java_frame(bool clear_fp);
 778 
 779   // Stores
 780   void store_check(Register obj);                // store check for obj - register is destroyed afterwards
 781   void store_check(Register obj, Address dst);   // same as above, dst is exact store location (reg. is destroyed)
 782 
 783   void resolve_jobject(Register value, Register thread, Register tmp);
 784 
 785   // oop manipulations
 786   void load_klass(Register dst, Register src);
 787   void store_klass(Register dst, Register src);
 788   void cmp_klass(Register oop, Register trial_klass, Register tmp);
 789 
 790   void resolve_oop_handle(Register result, Register tmp = r5);
 791   void load_mirror(Register dst, Register method, Register tmp = r5);
 792 
 793   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 794                       Register tmp1, Register tmp_thread);
 795 
 796   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
 797                        Register tmp1, Register tmp_thread);
 798 
 799   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 800                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 801 
 802   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 803                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 804   void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
 805                       Register tmp_thread = noreg, DecoratorSet decorators = 0);
 806 
 807   // currently unimplemented
 808   // Used for storing NULL. All other oop constants should be
 809   // stored using routines that take a jobject.
 810   void store_heap_oop_null(Address dst);
 811 
 812   void load_prototype_header(Register dst, Register src);
 813 
 814   void store_klass_gap(Register dst, Register src);
 815 
 816   // This dummy is to prevent a call to store_heap_oop from
 817   // converting a zero (like NULL) into a Register by giving
 818   // the compiler two choices it can't resolve
 819 
 820   void store_heap_oop(Address dst, void* dummy);
 821 
 822   void encode_heap_oop(Register d, Register s);
 823   void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
 824   void decode_heap_oop(Register d, Register s);
 825   void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
 826   void encode_heap_oop_not_null(Register r);
 827   void decode_heap_oop_not_null(Register r);
 828   void encode_heap_oop_not_null(Register dst, Register src);
 829   void decode_heap_oop_not_null(Register dst, Register src);
 830 
 831   void set_narrow_oop(Register dst, jobject obj);
 832 
 833   void encode_klass_not_null(Register r);
 834   void decode_klass_not_null(Register r);
 835   void encode_klass_not_null(Register dst, Register src);
 836   void decode_klass_not_null(Register dst, Register src);
 837 
 838   void set_narrow_klass(Register dst, Klass* k);
 839 
 840   // if heap base register is used - reinit it with the correct value
 841   void reinit_heapbase();
 842 
 843   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 844 
 845   void push_CPU_state(bool save_vectors = false);
 846   void pop_CPU_state(bool restore_vectors = false) ;
 847 
 848   // Round up to a power of two
 849   void round_to(Register reg, int modulus);
 850 
 851   // allocation
 852   void eden_allocate(
 853     Register obj,                      // result: pointer to object after successful allocation
 854     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 855     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 856     Register t1,                       // temp register
 857     Label&   slow_case                 // continuation point if fast allocation fails
 858   );
 859   void tlab_allocate(
 860     Register obj,                      // result: pointer to object after successful allocation
 861     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 862     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 863     Register t1,                       // temp register
 864     Register t2,                       // temp register
 865     Label&   slow_case                 // continuation point if fast allocation fails
 866   );
 867   void zero_memory(Register addr, Register len, Register t1);
 868   void verify_tlab();
 869 
 870   // interface method calling
 871   void lookup_interface_method(Register recv_klass,
 872                                Register intf_klass,
 873                                RegisterOrConstant itable_index,
 874                                Register method_result,
 875                                Register scan_temp,
 876                                Label& no_such_interface,
 877                    bool return_method = true);
 878 
 879   // virtual method calling
 880   // n.b. x86 allows RegisterOrConstant for vtable_index
 881   void lookup_virtual_method(Register recv_klass,
 882                              RegisterOrConstant vtable_index,
 883                              Register method_result);
 884 
 885   // Test sub_klass against super_klass, with fast and slow paths.
 886 
 887   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 888   // One of the three labels can be NULL, meaning take the fall-through.
 889   // If super_check_offset is -1, the value is loaded up from super_klass.
 890   // No registers are killed, except temp_reg.
 891   void check_klass_subtype_fast_path(Register sub_klass,
 892                                      Register super_klass,
 893                                      Register temp_reg,
 894                                      Label* L_success,
 895                                      Label* L_failure,
 896                                      Label* L_slow_path,
 897                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 898 
 899   // The rest of the type check; must be wired to a corresponding fast path.
 900   // It does not repeat the fast path logic, so don't use it standalone.
 901   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 902   // Updates the sub's secondary super cache as necessary.
 903   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 904   void check_klass_subtype_slow_path(Register sub_klass,
 905                                      Register super_klass,
 906                                      Register temp_reg,
 907                                      Register temp2_reg,
 908                                      Label* L_success,
 909                                      Label* L_failure,
 910                                      bool set_cond_codes = false);
 911 
 912   // Simplified, combined version, good for typical uses.
 913   // Falls through on failure.
 914   void check_klass_subtype(Register sub_klass,
 915                            Register super_klass,
 916                            Register temp_reg,
 917                            Label& L_success);
 918 
 919   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 920 
 921 
 922   // Debugging
 923 
 924   // only if +VerifyOops
 925   void verify_oop(Register reg, const char* s = "broken oop");
 926   void verify_oop_addr(Address addr, const char * s = "broken oop addr");
 927 
 928 // TODO: verify method and klass metadata (compare against vptr?)
 929   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 930   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 931 
 932 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 933 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 934 
 935   // only if +VerifyFPU
 936   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
 937 
 938   // prints msg, dumps registers and stops execution
 939   void stop(const char* msg);
 940 
 941   // prints msg and continues
 942   void warn(const char* msg);
 943 
 944   static void debug64(char* msg, int64_t pc, int64_t regs[]);
 945 
 946   void untested()                                { stop("untested"); }
 947 
 948   void unimplemented(const char* what = "");
 949 
 950   void should_not_reach_here()                   { stop("should not reach here"); }
 951 
 952   // Stack overflow checking
 953   void bang_stack_with_offset(int offset) {
 954     // stack grows down, caller passes positive offset
 955     assert(offset > 0, "must bang with negative offset");
 956     sub(rscratch2, sp, offset);
 957     str(zr, Address(rscratch2));
 958   }
 959 
 960   // Writes to stack successive pages until offset reached to check for
 961   // stack overflow + shadow pages.  Also, clobbers tmp
 962   void bang_stack_size(Register size, Register tmp);
 963 
 964   // Check for reserved stack access in method being exited (for JIT)
 965   void reserved_stack_check();
 966 
 967   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
 968                                                 Register tmp,
 969                                                 int offset);
 970 
 971   // Support for serializing memory accesses between threads
 972   void serialize_memory(Register thread, Register tmp);
 973 
 974   // Arithmetics
 975 
 976   void addptr(const Address &dst, int32_t src);
 977   void cmpptr(Register src1, Address src2);
 978 
 979   void cmpoop(Register obj1, Register obj2);
 980 
 981   // Various forms of CAS
 982 
 983   void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
 984                           Label &suceed, Label *fail);
 985   void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
 986                   Label &suceed, Label *fail);
 987 
 988   void cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
 989                   Label &suceed, Label *fail);
 990 
 991   void atomic_add(Register prev, RegisterOrConstant incr, Register addr);
 992   void atomic_addw(Register prev, RegisterOrConstant incr, Register addr);
 993   void atomic_addal(Register prev, RegisterOrConstant incr, Register addr);
 994   void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr);
 995 
 996   void atomic_xchg(Register prev, Register newv, Register addr);
 997   void atomic_xchgw(Register prev, Register newv, Register addr);
 998   void atomic_xchgal(Register prev, Register newv, Register addr);
 999   void atomic_xchgalw(Register prev, Register newv, Register addr);
1000 
1001   void orptr(Address adr, RegisterOrConstant src) {
1002     ldr(rscratch1, adr);
1003     if (src.is_register())
1004       orr(rscratch1, rscratch1, src.as_register());
1005     else
1006       orr(rscratch1, rscratch1, src.as_constant());
1007     str(rscratch1, adr);
1008   }
1009 
1010   // A generic CAS; success or failure is in the EQ flag.
1011   // Clobbers rscratch1
1012   void cmpxchg(Register addr, Register expected, Register new_val,
1013                enum operand_size size,
1014                bool acquire, bool release, bool weak,
1015                Register result);
1016 
1017   // Calls
1018 
1019   address trampoline_call(Address entry, CodeBuffer *cbuf = NULL);
1020 
1021   static bool far_branches() {
1022     return ReservedCodeCacheSize > branch_range || UseAOT;
1023   }
1024 
1025   // Jumps that can reach anywhere in the code cache.
1026   // Trashes tmp.
1027   void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1);
1028   void far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1);
1029 
1030   static int far_branch_size() {
1031     if (far_branches()) {
1032       return 3 * 4;  // adrp, add, br
1033     } else {
1034       return 4;
1035     }
1036   }
1037 
1038   // Emit the CompiledIC call idiom
1039   address ic_call(address entry, jint method_index = 0);
1040 
1041 public:
1042 
1043   // Data
1044 
1045   void mov_metadata(Register dst, Metadata* obj);
1046   Address allocate_metadata_address(Metadata* obj);
1047   Address constant_oop_address(jobject obj);
1048 
1049   void movoop(Register dst, jobject obj, bool immediate = false);
1050 
1051   // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
1052   void kernel_crc32(Register crc, Register buf, Register len,
1053         Register table0, Register table1, Register table2, Register table3,
1054         Register tmp, Register tmp2, Register tmp3);
1055   // CRC32 code for java.util.zip.CRC32C::updateBytes() instrinsic.
1056   void kernel_crc32c(Register crc, Register buf, Register len,
1057         Register table0, Register table1, Register table2, Register table3,
1058         Register tmp, Register tmp2, Register tmp3);
1059 
1060   // Stack push and pop individual 64 bit registers
1061   void push(Register src);
1062   void pop(Register dst);
1063 
1064   // push all registers onto the stack
1065   void pusha();
1066   void popa();
1067 
1068   void repne_scan(Register addr, Register value, Register count,
1069                   Register scratch);
1070   void repne_scanw(Register addr, Register value, Register count,
1071                    Register scratch);
1072 
1073   typedef void (MacroAssembler::* add_sub_imm_insn)(Register Rd, Register Rn, unsigned imm);
1074   typedef void (MacroAssembler::* add_sub_reg_insn)(Register Rd, Register Rn, Register Rm, enum shift_kind kind, unsigned shift);
1075 
1076   // If a constant does not fit in an immediate field, generate some
1077   // number of MOV instructions and then perform the operation
1078   void wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
1079                              add_sub_imm_insn insn1,
1080                              add_sub_reg_insn insn2);
1081   // Seperate vsn which sets the flags
1082   void wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm,
1083                              add_sub_imm_insn insn1,
1084                              add_sub_reg_insn insn2);
1085 
1086 #define WRAP(INSN)                                                      \
1087   void INSN(Register Rd, Register Rn, unsigned imm) {                   \
1088     wrap_add_sub_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \
1089   }                                                                     \
1090                                                                         \
1091   void INSN(Register Rd, Register Rn, Register Rm,                      \
1092              enum shift_kind kind, unsigned shift = 0) {                \
1093     Assembler::INSN(Rd, Rn, Rm, kind, shift);                           \
1094   }                                                                     \
1095                                                                         \
1096   void INSN(Register Rd, Register Rn, Register Rm) {                    \
1097     Assembler::INSN(Rd, Rn, Rm);                                        \
1098   }                                                                     \
1099                                                                         \
1100   void INSN(Register Rd, Register Rn, Register Rm,                      \
1101            ext::operation option, int amount = 0) {                     \
1102     Assembler::INSN(Rd, Rn, Rm, option, amount);                        \
1103   }
1104 
1105   WRAP(add) WRAP(addw) WRAP(sub) WRAP(subw)
1106 
1107 #undef WRAP
1108 #define WRAP(INSN)                                                      \
1109   void INSN(Register Rd, Register Rn, unsigned imm) {                   \
1110     wrap_adds_subs_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \
1111   }                                                                     \
1112                                                                         \
1113   void INSN(Register Rd, Register Rn, Register Rm,                      \
1114              enum shift_kind kind, unsigned shift = 0) {                \
1115     Assembler::INSN(Rd, Rn, Rm, kind, shift);                           \
1116   }                                                                     \
1117                                                                         \
1118   void INSN(Register Rd, Register Rn, Register Rm) {                    \
1119     Assembler::INSN(Rd, Rn, Rm);                                        \
1120   }                                                                     \
1121                                                                         \
1122   void INSN(Register Rd, Register Rn, Register Rm,                      \
1123            ext::operation option, int amount = 0) {                     \
1124     Assembler::INSN(Rd, Rn, Rm, option, amount);                        \
1125   }
1126 
1127   WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw)
1128 
1129   void add(Register Rd, Register Rn, RegisterOrConstant increment);
1130   void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1131   void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1132   void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1133 
1134   void adrp(Register reg1, const Address &dest, unsigned long &byte_offset);
1135 
1136   void tableswitch(Register index, jint lowbound, jint highbound,
1137                    Label &jumptable, Label &jumptable_end, int stride = 1) {
1138     adr(rscratch1, jumptable);
1139     subsw(rscratch2, index, lowbound);
1140     subsw(zr, rscratch2, highbound - lowbound);
1141     br(Assembler::HS, jumptable_end);
1142     add(rscratch1, rscratch1, rscratch2,
1143         ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1144     br(rscratch1);
1145   }
1146 
1147   // Form an address from base + offset in Rd.  Rd may or may not
1148   // actually be used: you must use the Address that is returned.  It
1149   // is up to you to ensure that the shift provided matches the size
1150   // of your data.
1151   Address form_address(Register Rd, Register base, long byte_offset, int shift);
1152 
1153   // Return true iff an address is within the 48-bit AArch64 address
1154   // space.
1155   bool is_valid_AArch64_address(address a) {
1156     return ((uint64_t)a >> 48) == 0;
1157   }
1158 
1159   // Load the base of the cardtable byte map into reg.
1160   void load_byte_map_base(Register reg);
1161 
1162   // Prolog generator routines to support switch between x86 code and
1163   // generated ARM code
1164 
1165   // routine to generate an x86 prolog for a stub function which
1166   // bootstraps into the generated ARM code which directly follows the
1167   // stub
1168   //
1169 
1170   public:
1171   // enum used for aarch64--x86 linkage to define return type of x86 function
1172   enum ret_type { ret_type_void, ret_type_integral, ret_type_float, ret_type_double};
1173 
1174 #ifdef BUILTIN_SIM
1175   void c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type, address *prolog_ptr = NULL);
1176 #else
1177   void c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type) { }
1178 #endif
1179 
1180   // special version of call_VM_leaf_base needed for aarch64 simulator
1181   // where we need to specify both the gp and fp arg counts and the
1182   // return type so that the linkage routine from aarch64 to x86 and
1183   // back knows which aarch64 registers to copy to x86 registers and
1184   // which x86 result register to copy back to an aarch64 register
1185 
1186   void call_VM_leaf_base1(
1187     address  entry_point,             // the entry point
1188     int      number_of_gp_arguments,  // the number of gp reg arguments to pass
1189     int      number_of_fp_arguments,  // the number of fp reg arguments to pass
1190     ret_type type,                    // the return type for the call
1191     Label*   retaddr = NULL
1192   );
1193 
1194   void ldr_constant(Register dest, const Address &const_addr) {
1195     if (NearCpool) {
1196       ldr(dest, const_addr);
1197     } else {
1198       unsigned long offset;
1199       adrp(dest, InternalAddress(const_addr.target()), offset);
1200       ldr(dest, Address(dest, offset));
1201     }
1202   }
1203 
1204   address read_polling_page(Register r, address page, relocInfo::relocType rtype);
1205   address read_polling_page(Register r, relocInfo::relocType rtype);
1206   void get_polling_page(Register dest, address page, relocInfo::relocType rtype);
1207 
1208   // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
1209   void update_byte_crc32(Register crc, Register val, Register table);
1210   void update_word_crc32(Register crc, Register v, Register tmp,
1211         Register table0, Register table1, Register table2, Register table3,
1212         bool upper = false);
1213 
1214   void string_compare(Register str1, Register str2,
1215                       Register cnt1, Register cnt2, Register result,
1216                       Register tmp1, Register tmp2, FloatRegister vtmp1,
1217                       FloatRegister vtmp2, FloatRegister vtmp3, int ae);
1218 
1219   void has_negatives(Register ary1, Register len, Register result);
1220 
1221   void arrays_equals(Register a1, Register a2, Register result, Register cnt1,
1222                      Register tmp1, Register tmp2, Register tmp3, int elem_size);
1223 
1224   void string_equals(Register a1, Register a2, Register result, Register cnt1,
1225                      int elem_size);
1226 
1227   void fill_words(Register base, Register cnt, Register value);
1228   void zero_words(Register base, u_int64_t cnt);
1229   void zero_words(Register ptr, Register cnt);
1230   void zero_dcache_blocks(Register base, Register cnt);
1231 
1232   static const int zero_words_block_size;
1233 
1234   void byte_array_inflate(Register src, Register dst, Register len,
1235                           FloatRegister vtmp1, FloatRegister vtmp2,
1236                           FloatRegister vtmp3, Register tmp4);
1237 
1238   void char_array_compress(Register src, Register dst, Register len,
1239                            FloatRegister tmp1Reg, FloatRegister tmp2Reg,
1240                            FloatRegister tmp3Reg, FloatRegister tmp4Reg,
1241                            Register result);
1242 
1243   void encode_iso_array(Register src, Register dst,
1244                         Register len, Register result,
1245                         FloatRegister Vtmp1, FloatRegister Vtmp2,
1246                         FloatRegister Vtmp3, FloatRegister Vtmp4);
1247   void string_indexof(Register str1, Register str2,
1248                       Register cnt1, Register cnt2,
1249                       Register tmp1, Register tmp2,
1250                       Register tmp3, Register tmp4,
1251                       Register tmp5, Register tmp6,
1252                       int int_cnt1, Register result, int ae);
1253   void string_indexof_char(Register str1, Register cnt1,
1254                            Register ch, Register result,
1255                            Register tmp1, Register tmp2, Register tmp3);
1256   void fast_log(FloatRegister vtmp0, FloatRegister vtmp1, FloatRegister vtmp2,
1257                 FloatRegister vtmp3, FloatRegister vtmp4, FloatRegister vtmp5,
1258                 FloatRegister tmpC1, FloatRegister tmpC2, FloatRegister tmpC3,
1259                 FloatRegister tmpC4, Register tmp1, Register tmp2,
1260                 Register tmp3, Register tmp4, Register tmp5);
1261   void generate_dsin_dcos(bool isCos, address npio2_hw, address two_over_pi,
1262       address pio2, address dsin_coef, address dcos_coef);
1263  private:
1264   // begin trigonometric functions support block
1265   void generate__ieee754_rem_pio2(address npio2_hw, address two_over_pi, address pio2);
1266   void generate__kernel_rem_pio2(address two_over_pi, address pio2);
1267   void generate_kernel_sin(FloatRegister x, bool iyIsOne, address dsin_coef);
1268   void generate_kernel_cos(FloatRegister x, address dcos_coef);
1269   // end trigonometric functions support block
1270   void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
1271                        Register src1, Register src2);
1272   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
1273     add2_with_carry(dest_hi, dest_hi, dest_lo, src1, src2);
1274   }
1275   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1276                              Register y, Register y_idx, Register z,
1277                              Register carry, Register product,
1278                              Register idx, Register kdx);
1279   void multiply_128_x_128_loop(Register y, Register z,
1280                                Register carry, Register carry2,
1281                                Register idx, Register jdx,
1282                                Register yz_idx1, Register yz_idx2,
1283                                Register tmp, Register tmp3, Register tmp4,
1284                                Register tmp7, Register product_hi);
1285   void kernel_crc32_using_crc32(Register crc, Register buf,
1286         Register len, Register tmp0, Register tmp1, Register tmp2,
1287         Register tmp3);
1288   void kernel_crc32c_using_crc32c(Register crc, Register buf,
1289         Register len, Register tmp0, Register tmp1, Register tmp2,
1290         Register tmp3);
1291 public:
1292   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z,
1293                        Register zlen, Register tmp1, Register tmp2, Register tmp3,
1294                        Register tmp4, Register tmp5, Register tmp6, Register tmp7);
1295   void mul_add(Register out, Register in, Register offs, Register len, Register k);
1296   // ISB may be needed because of a safepoint
1297   void maybe_isb() { isb(); }
1298 
1299 private:
1300   // Return the effective address r + (r1 << ext) + offset.
1301   // Uses rscratch2.
1302   Address offsetted_address(Register r, Register r1, Address::extend ext,
1303                             int offset, int size);
1304 
1305 private:
1306   // Returns an address on the stack which is reachable with a ldr/str of size
1307   // Uses rscratch2 if the address is not directly reachable
1308   Address spill_address(int size, int offset, Register tmp=rscratch2);
1309 
1310   bool merge_alignment_check(Register base, size_t size, long cur_offset, long prev_offset) const;
1311 
1312   // Check whether two loads/stores can be merged into ldp/stp.
1313   bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const;
1314 
1315   // Merge current load/store with previous load/store into ldp/stp.
1316   void merge_ldst(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store);
1317 
1318   // Try to merge two loads/stores into ldp/stp. If success, returns true else false.
1319   bool try_merge_ldst(Register rt, const Address &adr, size_t cur_size_in_bytes, bool is_store);
1320 
1321 public:
1322   void spill(Register Rx, bool is64, int offset) {
1323     if (is64) {
1324       str(Rx, spill_address(8, offset));
1325     } else {
1326       strw(Rx, spill_address(4, offset));
1327     }
1328   }
1329   void spill(FloatRegister Vx, SIMD_RegVariant T, int offset) {
1330     str(Vx, T, spill_address(1 << (int)T, offset));
1331   }
1332   void unspill(Register Rx, bool is64, int offset) {
1333     if (is64) {
1334       ldr(Rx, spill_address(8, offset));
1335     } else {
1336       ldrw(Rx, spill_address(4, offset));
1337     }
1338   }
1339   void unspill(FloatRegister Vx, SIMD_RegVariant T, int offset) {
1340     ldr(Vx, T, spill_address(1 << (int)T, offset));
1341   }
1342   void spill_copy128(int src_offset, int dst_offset,
1343                      Register tmp1=rscratch1, Register tmp2=rscratch2) {
1344     if (src_offset < 512 && (src_offset & 7) == 0 &&
1345         dst_offset < 512 && (dst_offset & 7) == 0) {
1346       ldp(tmp1, tmp2, Address(sp, src_offset));
1347       stp(tmp1, tmp2, Address(sp, dst_offset));
1348     } else {
1349       unspill(tmp1, true, src_offset);
1350       spill(tmp1, true, dst_offset);
1351       unspill(tmp1, true, src_offset+8);
1352       spill(tmp1, true, dst_offset+8);
1353     }
1354   }
1355 };
1356 
1357 #ifdef ASSERT
1358 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1359 #endif
1360 
1361 /**
1362  * class SkipIfEqual:
1363  *
1364  * Instantiating this class will result in assembly code being output that will
1365  * jump around any code emitted between the creation of the instance and it's
1366  * automatic destruction at the end of a scope block, depending on the value of
1367  * the flag passed to the constructor, which will be checked at run-time.
1368  */
1369 class SkipIfEqual {
1370  private:
1371   MacroAssembler* _masm;
1372   Label _label;
1373 
1374  public:
1375    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1376    ~SkipIfEqual();
1377 };
1378 
1379 struct tableswitch {
1380   Register _reg;
1381   int _insn_index; jint _first_key; jint _last_key;
1382   Label _after;
1383   Label _branches;
1384 };
1385 
1386 #endif // CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP