1 /*
   2  * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_S390_VM_MACROASSEMBLER_S390_HPP
  27 #define CPU_S390_VM_MACROASSEMBLER_S390_HPP
  28 
  29 #include "asm/assembler.hpp"
  30 
  31 #define MODERN_IFUN(name)  ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name)
  32 #define CLASSIC_IFUN(name) ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name)
  33 #define MODERN_FFUN(name)  ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name)
  34 #define CLASSIC_FFUN(name) ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name)
  35 
  36 class MacroAssembler: public Assembler {
  37  public:
  38   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  39 
  40   //
  41   // Optimized instruction emitters
  42   //
  43 
  44   // Move register if destination register and target register are different.
  45   void lr_if_needed(Register rd, Register rs);
  46   void lgr_if_needed(Register rd, Register rs);
  47   void llgfr_if_needed(Register rd, Register rs);
  48   void ldr_if_needed(FloatRegister rd, FloatRegister rs);
  49 
  50   void move_reg_if_needed(Register dest, BasicType dest_type, Register src, BasicType src_type);
  51   void move_freg_if_needed(FloatRegister dest, BasicType dest_type, FloatRegister src, BasicType src_type);
  52 
  53   void freg2mem_opt(FloatRegister reg,
  54                     int64_t       disp,
  55                     Register      index,
  56                     Register      base,
  57                     void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
  58                     void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
  59                     Register      scratch = Z_R0);
  60   void freg2mem_opt(FloatRegister reg,
  61                     const Address &a, bool is_double = true);
  62 
  63   void mem2freg_opt(FloatRegister reg,
  64                     int64_t       disp,
  65                     Register      index,
  66                     Register      base,
  67                     void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
  68                     void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
  69                     Register      scratch = Z_R0);
  70   void mem2freg_opt(FloatRegister reg,
  71                     const Address &a, bool is_double = true);
  72 
  73   void reg2mem_opt(Register reg,
  74                    int64_t  disp,
  75                    Register index,
  76                    Register base,
  77                    void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
  78                    void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
  79                    Register scratch = Z_R0);
  80   // returns offset of the store instruction
  81   int reg2mem_opt(Register reg, const Address &a, bool is_double = true);
  82 
  83   void mem2reg_opt(Register reg,
  84                    int64_t  disp,
  85                    Register index,
  86                    Register base,
  87                    void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
  88                    void (MacroAssembler::*classic)(Register, int64_t, Register, Register));
  89   void mem2reg_opt(Register reg, const Address &a, bool is_double = true);
  90   void mem2reg_signed_opt(Register reg, const Address &a);
  91 
  92   // AND immediate and set condition code, works for 64 bit immediates/operation as well.
  93    void and_imm(Register r, long mask, Register tmp = Z_R0, bool wide = false);
  94 
  95   // 1's complement, 32bit or 64bit. Optimized to exploit distinct operands facility.
  96   // Note: The condition code is neither preserved nor correctly set by this code!!!
  97   // Note: (wide == false) does not protect the high order half of the target register
  98   // from alternation. It only serves as optimization hint for 32-bit results.
  99   void not_(Register r1, Register r2 = noreg, bool wide = false);  // r1 = ~r2
 100 
 101   // Expanded support of all "rotate_then_<logicalOP>" instructions.
 102   //
 103   // Generalize and centralize rotate_then_<logicalOP> emitter.
 104   // Functional description. For details, see Principles of Operation, Chapter 7, "Rotate Then Insert..."
 105   //  - Bits  in a register are numbered left (most significant) to right (least significant), i.e. [0..63].
 106   //  - Bytes in a register are numbered left (most significant) to right (least significant), i.e. [0..7].
 107   //  - Register src is rotated to the left by (nRotate&0x3f) positions.
 108   //  - Negative values for nRotate result in a rotation to the right by abs(nRotate) positions.
 109   //  - The bits in positions [lBitPos..rBitPos] of the _ROTATED_ src operand take part in the
 110   //    logical operation performed on the contents (in those positions) of the dst operand.
 111   //  - The logical operation that is performed on the dst operand is one of
 112   //     o insert the selected bits (replacing the original contents of those bit positions)
 113   //     o and the selected bits with the corresponding bits of the dst operand
 114   //     o or  the selected bits with the corresponding bits of the dst operand
 115   //     o xor the selected bits with the corresponding bits of the dst operand
 116   //  - For clear_dst == true, the destination register is cleared before the bits are inserted.
 117   //    For clear_dst == false, only the bit positions that get data inserted from src
 118   //    are changed. All other bit positions remain unchanged.
 119   //  - For test_only == true,  the result of the logicalOP is only used to set the condition code, dst remains unchanged.
 120   //    For test_only == false, the result of the logicalOP replaces the selected bits of dst.
 121   //  - src32bit and dst32bit indicate the respective register is used as 32bit value only.
 122   //    Knowledge can simplify code generation.
 123   //
 124   // Here is an important performance note, valid for all <logicalOP>s except "insert":
 125   //   Due to the too complex nature of the operation, it cannot be done in a single cycle.
 126   //   Timing constraints require the instructions to be cracked into two micro-ops, taking
 127   //   one or two cycles each to execute. In some cases, an additional pipeline bubble might get added.
 128   //   Macroscopically, that makes up for a three- or four-cycle instruction where you would
 129   //   expect just a single cycle.
 130   //   It is thus not beneficial from a performance point of view to exploit those instructions.
 131   //   Other reasons (code compactness, register pressure, ...) might outweigh this penalty.
 132   //
 133   unsigned long create_mask(int lBitPos, int rBitPos);
 134   void rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
 135                         int nRotate, bool src32bit, bool dst32bit, bool oneBits);
 136   void rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
 137                           bool clear_dst);
 138   void rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
 139                        bool test_only);
 140   void rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
 141                       bool test_onlyt);
 142   void rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
 143                        bool test_only);
 144 
 145   void add64(Register r1, RegisterOrConstant inc);
 146 
 147   // Helper function to multiply the 64bit contents of a register by a 16bit constant.
 148   // The optimization tries to avoid the mghi instruction, since it uses the FPU for
 149   // calculation and is thus rather slow.
 150   //
 151   // There is no handling for special cases, e.g. cval==0 or cval==1.
 152   //
 153   // Returns len of generated code block.
 154   unsigned int mul_reg64_const16(Register rval, Register work, int cval);
 155 
 156   // Generic operation r1 := r2 + imm.
 157   void add2reg(Register r1, int64_t imm, Register r2 = noreg);
 158   // Generic operation r := b + x + d.
 159   void add2reg_with_index(Register r, int64_t d, Register x, Register b = noreg);
 160 
 161   // Add2mem* methods for direct memory increment.
 162   void add2mem_32(const Address &a, int64_t imm, Register tmp);
 163   void add2mem_64(const Address &a, int64_t imm, Register tmp);
 164 
 165   // *((int8_t*)(dst)) |= imm8
 166   inline void or2mem_8(Address& dst, int64_t imm8);
 167 
 168   // Load values by size and signedness.
 169   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
 170   void store_sized_value(Register src, Address dst, size_t size_in_bytes);
 171 
 172   // Load values with large offsets to base address.
 173  private:
 174   int  split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate);
 175  public:
 176   void load_long_largeoffset(Register t, int64_t si20, Register a, Register tmp);
 177   void load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp);
 178   void load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp);
 179 
 180  private:
 181   long toc_distance();
 182  public:
 183   void load_toc(Register Rtoc);
 184   void load_long_pcrelative(Register Rdst, address dataLocation);
 185   static int load_long_pcrelative_size() { return 6; }
 186   void load_addr_pcrelative(Register Rdst, address dataLocation);
 187   static int load_addr_pcrel_size() { return 6; } // Just a LARL.
 188 
 189   // Load a value from memory and test (set CC).
 190   void load_and_test_byte    (Register dst, const Address &a);
 191   void load_and_test_short   (Register dst, const Address &a);
 192   void load_and_test_int     (Register dst, const Address &a);
 193   void load_and_test_int2long(Register dst, const Address &a);
 194   void load_and_test_long    (Register dst, const Address &a);
 195 
 196   // Test a bit in memory. Result is reflected in CC.
 197   void testbit(const Address &a, unsigned int bit);
 198   // Test a bit in a register. Result is reflected in CC.
 199   void testbit(Register r, unsigned int bitPos);
 200 
 201   void prefetch_read(Address a);
 202   void prefetch_update(Address a);
 203 
 204   // Clear a register, i.e. load const zero into reg. Return len (in bytes) of
 205   // generated instruction(s).
 206   //   whole_reg: Clear 64 bits if true, 32 bits otherwise.
 207   //   set_cc: Use instruction that sets the condition code, if true.
 208   int clear_reg(Register r, bool whole_reg = true, bool set_cc = true);
 209 
 210 #ifdef ASSERT
 211   int preset_reg(Register r, unsigned long pattern, int pattern_len);
 212 #endif
 213 
 214   // Clear (store zeros) a small piece of memory.
 215   // CAUTION: Do not use this for atomic memory clearing. Use store_const() instead.
 216   //   addr: Address descriptor of memory to clear.
 217   //         Index register will not be used!
 218   //   size: Number of bytes to clear.
 219   void clear_mem(const Address& addr, unsigned size);
 220 
 221   // Move immediate values to memory. Currently supports 32 and 64 bit stores,
 222   // but may be extended to 16 bit store operation, if needed.
 223   // For details, see implementation in *.cpp file.
 224          int store_const(const Address &dest, long imm,
 225                          unsigned int lm, unsigned int lc,
 226                          Register scratch = Z_R0);
 227   inline int store_const(const Address &dest, long imm,
 228                          Register scratch = Z_R0, bool is_long = true);
 229 
 230   // Move/initialize arbitrarily large memory area. No check for destructive overlap.
 231   // Being interruptible, these instructions need a retry-loop.
 232   void move_long_ext(Register dst, Register src, unsigned int pad);
 233 
 234   void compare_long_ext(Register left, Register right, unsigned int pad);
 235   void compare_long_uni(Register left, Register right, unsigned int pad);
 236 
 237   void search_string(Register end, Register start);
 238   void search_string_uni(Register end, Register start);
 239 
 240   // Translate instructions
 241   // Being interruptible, these instructions need a retry-loop.
 242   void translate_oo(Register dst, Register src, uint mask);
 243   void translate_ot(Register dst, Register src, uint mask);
 244   void translate_to(Register dst, Register src, uint mask);
 245   void translate_tt(Register dst, Register src, uint mask);
 246 
 247   // Crypto instructions.
 248   // Being interruptible, these instructions need a retry-loop.
 249   void cksm(Register crcBuff, Register srcBuff);
 250   void km( Register dstBuff, Register srcBuff);
 251   void kmc(Register dstBuff, Register srcBuff);
 252   void kimd(Register srcBuff);
 253   void klmd(Register srcBuff);
 254   void kmac(Register srcBuff);
 255 
 256   // nop padding
 257   void align(int modulus);
 258   void align_address(int modulus);
 259 
 260   //
 261   // Constants, loading constants, TOC support
 262   //
 263   // Safepoint check factored out.
 264   void generate_safepoint_check(Label& slow_path, Register scratch = noreg, bool may_relocate = true);
 265 
 266   // Load generic address: d <- base(a) + index(a) + disp(a).
 267   inline void load_address(Register d, const Address &a);
 268   // Load absolute address (and try to optimize).
 269   void load_absolute_address(Register d, address addr);
 270 
 271   // Address of Z_ARG1 and argument_offset.
 272   // If temp_reg == arg_slot, arg_slot will be overwritten.
 273   Address argument_address(RegisterOrConstant arg_slot,
 274                            Register temp_reg = noreg,
 275                            int64_t extra_slot_offset = 0);
 276 
 277   // Load a narrow ptr constant (oop or klass ptr).
 278   void load_narrow_oop( Register t, narrowOop a);
 279   void load_narrow_klass(Register t, Klass* k);
 280 
 281   static bool is_load_const_32to64(address pos);
 282   static bool is_load_narrow_oop(address pos)   { return is_load_const_32to64(pos); }
 283   static bool is_load_narrow_klass(address pos) { return is_load_const_32to64(pos); }
 284 
 285   static int  load_const_32to64_size()          { return 6; }
 286   static bool load_narrow_oop_size()            { return load_const_32to64_size(); }
 287   static bool load_narrow_klass_size()          { return load_const_32to64_size(); }
 288 
 289   static int  patch_load_const_32to64(address pos, int64_t a);
 290   static int  patch_load_narrow_oop(address pos, oop o);
 291   static int  patch_load_narrow_klass(address pos, Klass* k);
 292 
 293   // cOops. CLFI exploit.
 294   void compare_immediate_narrow_oop(Register oop1, narrowOop oop2);
 295   void compare_immediate_narrow_klass(Register op1, Klass* op2);
 296   static bool is_compare_immediate32(address pos);
 297   static bool is_compare_immediate_narrow_oop(address pos);
 298   static bool is_compare_immediate_narrow_klass(address pos);
 299   static int  compare_immediate_narrow_size()       { return 6; }
 300   static int  compare_immediate_narrow_oop_size()   { return compare_immediate_narrow_size(); }
 301   static int  compare_immediate_narrow_klass_size() { return compare_immediate_narrow_size(); }
 302   static int  patch_compare_immediate_32(address pos, int64_t a);
 303   static int  patch_compare_immediate_narrow_oop(address pos, oop o);
 304   static int  patch_compare_immediate_narrow_klass(address pos, Klass* k);
 305 
 306   // Load a 32bit constant into a 64bit register.
 307   void load_const_32to64(Register t, int64_t x, bool sign_extend=true);
 308   // Load a 64 bit constant.
 309          void load_const(Register t, long a);
 310   inline void load_const(Register t, void* a);
 311   inline void load_const(Register t, Label& L);
 312   inline void load_const(Register t, const AddressLiteral& a);
 313   // Get the 64 bit constant from a `load_const' sequence.
 314   static long get_const(address load_const);
 315   // Patch the 64 bit constant of a `load_const' sequence. This is a low level
 316   // procedure. It neither flushes the instruction cache nor is it atomic.
 317   static void patch_const(address load_const, long x);
 318   static int load_const_size() { return 12; }
 319 
 320   // Turn a char into boolean. NOTE: destroys r.
 321   void c2bool(Register r, Register t = Z_R0);
 322 
 323   // Optimized version of load_const for constants that do not need to be
 324   // loaded by a sequence of instructions of fixed length and that do not
 325   // need to be patched.
 326   int load_const_optimized_rtn_len(Register t, long x, bool emit);
 327   inline void load_const_optimized(Register t, long x);
 328   inline void load_const_optimized(Register t, void* a);
 329   inline void load_const_optimized(Register t, Label& L);
 330   inline void load_const_optimized(Register t, const AddressLiteral& a);
 331 
 332  public:
 333 
 334   //----------------------------------------------------------
 335   //            oops in code             -------------
 336   //  including compressed oops support  -------------
 337   //----------------------------------------------------------
 338 
 339   // Metadata in code that we have to keep track of.
 340   AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
 341   AddressLiteral constant_metadata_address(Metadata* obj); // find_index
 342 
 343   // allocate_index
 344   AddressLiteral allocate_oop_address(jobject obj);
 345   // find_index
 346   AddressLiteral constant_oop_address(jobject obj);
 347   // Uses allocate_oop_address.
 348   inline void set_oop         (jobject obj, Register d);
 349   // Uses constant_oop_address.
 350   inline void set_oop_constant(jobject obj, Register d);
 351   // Uses constant_metadata_address.
 352   inline bool set_metadata_constant(Metadata* md, Register d);
 353 
 354   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
 355                                                 Register tmp,
 356                                                 int offset);
 357   //
 358   // branch, jump
 359   //
 360 
 361   // Use one generic function for all branch patches.
 362   static unsigned long patched_branch(address dest_pos, unsigned long inst, address inst_pos);
 363 
 364   void pd_patch_instruction(address branch, address target);
 365 
 366   // Extract relative address from "relative" instructions.
 367   static long get_pcrel_offset(unsigned long inst);
 368   static long get_pcrel_offset(address pc);
 369   static address get_target_addr_pcrel(address pc);
 370 
 371   static inline bool is_call_pcrelative_short(unsigned long inst);
 372   static inline bool is_call_pcrelative_long(unsigned long inst);
 373   static inline bool is_branch_pcrelative_short(unsigned long inst);
 374   static inline bool is_branch_pcrelative_long(unsigned long inst);
 375   static inline bool is_compareandbranch_pcrelative_short(unsigned long inst);
 376   static inline bool is_branchoncount_pcrelative_short(unsigned long inst);
 377   static inline bool is_branchonindex32_pcrelative_short(unsigned long inst);
 378   static inline bool is_branchonindex64_pcrelative_short(unsigned long inst);
 379   static inline bool is_branchonindex_pcrelative_short(unsigned long inst);
 380   static inline bool is_branch_pcrelative16(unsigned long inst);
 381   static inline bool is_branch_pcrelative32(unsigned long inst);
 382   static inline bool is_branch_pcrelative(unsigned long inst);
 383   static inline bool is_load_pcrelative_long(unsigned long inst);
 384   static inline bool is_misc_pcrelative_long(unsigned long inst);
 385   static inline bool is_pcrelative_short(unsigned long inst);
 386   static inline bool is_pcrelative_long(unsigned long inst);
 387   // PCrelative TOC access. Variants with address argument.
 388   static inline bool is_load_pcrelative_long(address iLoc);
 389   static inline bool is_pcrelative_short(address iLoc);
 390   static inline bool is_pcrelative_long(address iLoc);
 391 
 392   static inline bool is_pcrelative_instruction(address iloc);
 393   static inline bool is_load_addr_pcrel(address a);
 394 
 395   static void patch_target_addr_pcrel(address pc, address con);
 396   static void patch_addr_pcrel(address pc, address con) {
 397     patch_target_addr_pcrel(pc, con); // Just delegate. This is only for nativeInst_s390.cpp.
 398   }
 399 
 400   //---------------------------------------------------------
 401   //  Some macros for more comfortable assembler programming.
 402   //---------------------------------------------------------
 403 
 404   // NOTE: pass NearLabel T to signal that the branch target T will be bound to a near address.
 405 
 406   void compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
 407   void compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
 408   void compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
 409   void compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
 410 
 411   void branch_optimized(Assembler::branch_condition cond, address branch_target);
 412   void branch_optimized(Assembler::branch_condition cond, Label&  branch_target);
 413   void compare_and_branch_optimized(Register r1,
 414                                     Register r2,
 415                                     Assembler::branch_condition cond,
 416                                     address  branch_addr,
 417                                     bool     len64,
 418                                     bool     has_sign);
 419   void compare_and_branch_optimized(Register r1,
 420                                     jlong    x2,
 421                                     Assembler::branch_condition cond,
 422                                     Label&   branch_target,
 423                                     bool     len64,
 424                                     bool     has_sign);
 425   void compare_and_branch_optimized(Register r1,
 426                                     Register r2,
 427                                     Assembler::branch_condition cond,
 428                                     Label&   branch_target,
 429                                     bool     len64,
 430                                     bool     has_sign);
 431 
 432   //
 433   // Support for frame handling
 434   //
 435   // Specify the register that should be stored as the return pc in the
 436   // current frame (default is R14).
 437   inline void save_return_pc(Register pc = Z_R14);
 438   inline void restore_return_pc();
 439 
 440   // Get current PC.
 441   address get_PC(Register result);
 442 
 443   // Get current PC + offset. Offset given in bytes, must be even!
 444   address get_PC(Register result, int64_t offset);
 445 
 446   // Accessing, and in particular modifying, a stack location is only safe if
 447   // the stack pointer (Z_SP) is set such that the accessed stack location is
 448   // in the reserved range.
 449   //
 450   // From a performance point of view, it is desirable not to change the SP
 451   // first and then immediately use it to access the freshly reserved space.
 452   // That opens a small gap, though. If, just after storing some value (the
 453   // frame pointer) into the to-be-reserved space, an interrupt is caught,
 454   // the handler might use the space beyond Z_SP for it's own purpose.
 455   // If that happens, the stored value might get altered.
 456 
 457   // Resize current frame either relatively wrt to current SP or absolute.
 458   void resize_frame_sub(Register offset, Register fp, bool load_fp=true);
 459   void resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp);
 460   void resize_frame_absolute(Register addr, Register fp, bool load_fp);
 461   void resize_frame(RegisterOrConstant offset, Register fp, bool load_fp=true);
 462 
 463   // Push a frame of size bytes, if copy_sp is false, old_sp must already
 464   // contain a copy of Z_SP.
 465   void push_frame(Register bytes, Register old_sp, bool copy_sp = true, bool bytes_with_inverted_sign = false);
 466 
 467   // Push a frame of size `bytes'. no abi space provided.
 468   // Don't rely on register locking, instead pass a scratch register
 469   // (Z_R0 by default).
 470   // CAUTION! passing registers >= Z_R2 may produce bad results on
 471   // old CPUs!
 472   unsigned int push_frame(unsigned int bytes, Register scratch = Z_R0);
 473 
 474   // Push a frame of size `bytes' with abi160 on top.
 475   unsigned int push_frame_abi160(unsigned int bytes);
 476 
 477   // Pop current C frame.
 478   void pop_frame();
 479   // Pop current C frame and restore return PC register (Z_R14).
 480   void pop_frame_restore_retPC(int frame_size_in_bytes);
 481 
 482   //
 483   // Calls
 484   //
 485 
 486  private:
 487   address _last_calls_return_pc;
 488 
 489  public:
 490   // Support for VM calls. This is the base routine called by the
 491   // different versions of call_VM_leaf. The interpreter may customize
 492   // this version by overriding it for its purposes (e.g., to
 493   // save/restore additional registers when doing a VM call).
 494   void call_VM_leaf_base(address entry_point);
 495   void call_VM_leaf_base(address entry_point, bool allow_relocation);
 496 
 497   // It is imperative that all calls into the VM are handled via the
 498   // call_VM macros. They make sure that the stack linkage is setup
 499   // correctly. Call_VM's correspond to ENTRY/ENTRY_X entry points
 500   // while call_VM_leaf's correspond to LEAF entry points.
 501   //
 502   // This is the base routine called by the different versions of
 503   // call_VM. The interpreter may customize this version by overriding
 504   // it for its purposes (e.g., to save/restore additional registers
 505   // when doing a VM call).
 506 
 507   // If no last_java_sp is specified (noreg) then SP will be used instead.
 508 
 509   virtual void call_VM_base(
 510     Register        oop_result,        // Where an oop-result ends up if any; use noreg otherwise.
 511     Register        last_java_sp,      // To set up last_Java_frame in stubs; use noreg otherwise.
 512     address         entry_point,       // The entry point.
 513     bool            check_exception);  // Flag which indicates if exception should be checked.
 514   virtual void call_VM_base(
 515     Register        oop_result,       // Where an oop-result ends up if any; use noreg otherwise.
 516     Register        last_java_sp,     // To set up last_Java_frame in stubs; use noreg otherwise.
 517     address         entry_point,      // The entry point.
 518     bool            allow_relocation, // Flag to request generation of relocatable code.
 519     bool            check_exception); // Flag which indicates if exception should be checked.
 520 
 521   // Call into the VM.
 522   // Passes the thread pointer (in Z_ARG1) as a prepended argument.
 523   // Makes sure oop return values are visible to the GC.
 524   void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
 525   void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
 526   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 527   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
 528                Register arg_3, bool check_exceptions = true);
 529 
 530   void call_VM_static(Register oop_result, address entry_point, bool check_exceptions = true);
 531   void call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
 532                       Register arg_3, bool check_exceptions = true);
 533 
 534   // Overloaded with last_java_sp.
 535   void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true);
 536   void call_VM(Register oop_result, Register last_java_sp, address entry_point,
 537                Register arg_1, bool check_exceptions = true);
 538   void call_VM(Register oop_result, Register last_java_sp, address entry_point,
 539                Register arg_1, Register arg_2, bool check_exceptions = true);
 540   void call_VM(Register oop_result, Register last_java_sp, address entry_point,
 541                Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 542 
 543   void call_VM_leaf(address entry_point);
 544   void call_VM_leaf(address entry_point, Register arg_1);
 545   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 546   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 547 
 548   // Really static VM leaf call (never patched).
 549   void call_VM_leaf_static(address entry_point);
 550   void call_VM_leaf_static(address entry_point, Register arg_1);
 551   void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2);
 552   void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 553 
 554   // Call a C function via its function entry. Updates and returns _last_calls_return_pc.
 555   inline address call(Register function_entry);
 556   inline address call_c(Register function_entry);
 557          address call_c(address function_entry);
 558   // Variant for really static (non-relocatable) calls which are never patched.
 559          address call_c_static(address function_entry);
 560   // TOC or pc-relative call + emits a runtime_call relocation.
 561          address call_c_opt(address function_entry);
 562 
 563   inline address call_stub(Register function_entry);
 564   inline address call_stub(address  function_entry);
 565 
 566   // Get the pc where the last call will return to. Returns _last_calls_return_pc.
 567   inline address last_calls_return_pc();
 568 
 569  private:
 570   static bool is_call_far_patchable_variant0_at(address instruction_addr); // Dynamic TOC: load target addr from CP and call.
 571   static bool is_call_far_patchable_variant2_at(address instruction_addr); // PC-relative call, prefixed with NOPs.
 572 
 573 
 574  public:
 575   bool           call_far_patchable(address target, int64_t toc_offset);
 576   static bool    is_call_far_patchable_at(address inst_start);             // All supported forms of patchable calls.
 577   static bool    is_call_far_patchable_pcrelative_at(address inst_start);  // Pc-relative call with leading nops.
 578   static bool    is_call_far_pcrelative(address instruction_addr);         // Pure far pc-relative call, with one leading size adjustment nop.
 579   static void    set_dest_of_call_far_patchable_at(address inst_start, address target, int64_t toc_offset);
 580   static address get_dest_of_call_far_patchable_at(address inst_start, address toc_start);
 581 
 582   void align_call_far_patchable(address pc);
 583 
 584   // PCrelative TOC access.
 585 
 586   // This value is independent of code position - constant for the lifetime of the VM.
 587   static int call_far_patchable_size() {
 588     return load_const_from_toc_size() + call_byregister_size();
 589   }
 590 
 591   static int call_far_patchable_ret_addr_offset() { return call_far_patchable_size(); }
 592 
 593   static bool call_far_patchable_requires_alignment_nop(address pc) {
 594     if (!os::is_MP()) return false;
 595     int size = call_far_patchable_size();
 596     return ((intptr_t)(pc + size) & 0x03L) != 0;
 597   }
 598 
 599   // END OF PCrelative TOC access.
 600 
 601   static int jump_byregister_size()          { return 2; }
 602   static int jump_pcrelative_size()          { return 4; }
 603   static int jump_far_pcrelative_size()      { return 6; }
 604   static int call_byregister_size()          { return 2; }
 605   static int call_pcrelative_size()          { return 4; }
 606   static int call_far_pcrelative_size()      { return 2 + 6; } // Prepend each BRASL with a nop.
 607   static int call_far_pcrelative_size_raw()  { return 6; }     // Prepend each BRASL with a nop.
 608 
 609   //
 610   // Java utilities
 611   //
 612 
 613   // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
 614   // The implementation is only non-empty for the InterpreterMacroAssembler,
 615   // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
 616   virtual void check_and_handle_popframe(Register java_thread);
 617   virtual void check_and_handle_earlyret(Register java_thread);
 618 
 619   // Polling page support.
 620   enum poll_mask {
 621     mask_stackbang = 0xde, // 222 (dec)
 622     mask_safepoint = 0x6f, // 111 (dec)
 623     mask_profiling = 0xba  // 186 (dec)
 624   };
 625 
 626   // Read from the polling page.
 627   void load_from_polling_page(Register polling_page_address, int64_t offset = 0);
 628 
 629   // Check if given instruction is a read from the polling page
 630   // as emitted by load_from_polling_page.
 631   static bool is_load_from_polling_page(address instr_loc);
 632   // Extract poll address from instruction and ucontext.
 633   static address get_poll_address(address instr_loc, void* ucontext);
 634   // Extract poll register from instruction.
 635   static uint get_poll_register(address instr_loc);
 636 
 637   // Check if instruction is a write access to the memory serialization page
 638   // realized by one of the instructions stw, stwu, stwx, or stwux.
 639   static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
 640 
 641   // Support for serializing memory accesses between threads.
 642   void serialize_memory(Register thread, Register tmp1, Register tmp2);
 643 
 644   // Stack overflow checking
 645   void bang_stack_with_offset(int offset);
 646 
 647   // Check for reserved stack access in method being exited. If the reserved
 648   // stack area was accessed, protect it again and throw StackOverflowError.
 649   // Uses Z_R1.
 650   void reserved_stack_check(Register return_pc);
 651 
 652   // Atomics
 653   // -- none?
 654 
 655   void tlab_allocate(Register obj,                // Result: pointer to object after successful allocation
 656                      Register var_size_in_bytes,  // Object size in bytes if unknown at compile time; invalid otherwise.
 657                      int      con_size_in_bytes,  // Object size in bytes if   known at compile time.
 658                      Register t1,                 // temp register
 659                      Label&   slow_case);         // Continuation point if fast allocation fails.
 660 
 661   // Emitter for interface method lookup.
 662   //   input: recv_klass, intf_klass, itable_index
 663   //   output: method_result
 664   //   kills: itable_index, temp1_reg, Z_R0, Z_R1
 665   void lookup_interface_method(Register           recv_klass,
 666                                Register           intf_klass,
 667                                RegisterOrConstant itable_index,
 668                                Register           method_result,
 669                                Register           temp1_reg,
 670                                Register           temp2_reg,
 671                                Label&             no_such_interface);
 672 
 673   // virtual method calling
 674   void lookup_virtual_method(Register             recv_klass,
 675                              RegisterOrConstant   vtable_index,
 676                              Register             method_result);
 677 
 678   // Factor out code to call ic_miss_handler.
 679   unsigned int call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch);
 680   void nmethod_UEP(Label& ic_miss);
 681 
 682   // Emitters for "partial subtype" checks.
 683 
 684   // Test sub_klass against super_klass, with fast and slow paths.
 685 
 686   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 687   // One of the three labels can be NULL, meaning take the fall-through.
 688   // If super_check_offset is -1, the value is loaded up from super_klass.
 689   // No registers are killed, except temp_reg and temp2_reg.
 690   // If super_check_offset is not -1, temp1_reg is not used and can be noreg.
 691   void check_klass_subtype_fast_path(Register sub_klass,
 692                                      Register super_klass,
 693                                      Register temp1_reg,
 694                                      Label*   L_success,
 695                                      Label*   L_failure,
 696                                      Label*   L_slow_path,
 697                                      RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 698 
 699   // The rest of the type check; must be wired to a corresponding fast path.
 700   // It does not repeat the fast path logic, so don't use it standalone.
 701   // The temp_reg can be noreg, if no temps are available.
 702   // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
 703   // Updates the sub's secondary super cache as necessary.
 704   void check_klass_subtype_slow_path(Register Rsubklass,
 705                                      Register Rsuperklas,
 706                                      Register Rarray_ptr, // tmp
 707                                      Register Rlength,    // tmp
 708                                      Label* L_success,
 709                                      Label* L_failure);
 710 
 711   // Simplified, combined version, good for typical uses.
 712   // Falls through on failure.
 713   void check_klass_subtype(Register sub_klass,
 714                            Register super_klass,
 715                            Register temp1_reg,
 716                            Register temp2_reg,
 717                            Label&   L_success);
 718 
 719   // Increment a counter at counter_address when the eq condition code is set.
 720   // Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
 721   void increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg);
 722   // Biased locking support
 723   // Upon entry,obj_reg must contain the target object, and mark_reg
 724   // must contain the target object's header.
 725   // Destroys mark_reg if an attempt is made to bias an anonymously
 726   // biased lock. In this case a failure will go either to the slow
 727   // case or fall through with the notEqual condition code set with
 728   // the expectation that the slow case in the runtime will be called.
 729   // In the fall-through case where the CAS-based lock is done,
 730   // mark_reg is not destroyed.
 731   void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
 732                             Register temp2_reg, Label& done, Label* slow_case = NULL);
 733   // Upon entry, the base register of mark_addr must contain the oop.
 734   // Destroys temp_reg.
 735   // If allow_delay_slot_filling is set to true, the next instruction
 736   // emitted after this one will go in an annulled delay slot if the
 737   // biased locking exit case failed.
 738   void biased_locking_exit(Register mark_addr, Register temp_reg, Label& done);
 739 
 740   void compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking);
 741   void compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking);
 742 
 743   // Write to card table for modification at store_addr - register is destroyed afterwards.
 744   void card_write_barrier_post(Register store_addr, Register tmp);
 745 
 746   void resolve_jobject(Register value, Register tmp1, Register tmp2);
 747 
 748 #if INCLUDE_ALL_GCS
 749   // General G1 pre-barrier generator.
 750   // Purpose: record the previous value if it is not null.
 751   // All non-tmps are preserved.
 752   void g1_write_barrier_pre(Register           Robj,
 753                             RegisterOrConstant offset,
 754                             Register           Rpre_val,        // Ideally, this is a non-volatile register.
 755                             Register           Rval,            // Will be preserved.
 756                             Register           Rtmp1,           // If Rpre_val is volatile, either Rtmp1
 757                             Register           Rtmp2,           // or Rtmp2 has to be non-volatile.
 758                             bool               pre_val_needed); // Save Rpre_val across runtime call, caller uses it.
 759 
 760   // General G1 post-barrier generator.
 761   // Purpose: Store cross-region card.
 762   void g1_write_barrier_post(Register Rstore_addr,
 763                              Register Rnew_val,
 764                              Register Rtmp1,
 765                              Register Rtmp2,
 766                              Register Rtmp3);
 767 #endif // INCLUDE_ALL_GCS
 768 
 769   // Support for last Java frame (but use call_VM instead where possible).
 770  private:
 771   void set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation);
 772   void reset_last_Java_frame(bool allow_relocation);
 773   void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation);
 774  public:
 775   inline void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
 776   inline void set_last_Java_frame_static(Register last_java_sp, Register last_Java_pc);
 777   inline void reset_last_Java_frame(void);
 778   inline void reset_last_Java_frame_static(void);
 779   inline void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
 780   inline void set_top_ijava_frame_at_SP_as_last_Java_frame_static(Register sp, Register tmp1);
 781 
 782   void set_thread_state(JavaThreadState new_state);
 783 
 784   // Read vm result from thread.
 785   void get_vm_result  (Register oop_result);
 786   void get_vm_result_2(Register result);
 787 
 788   // Vm result is currently getting hijacked to for oop preservation.
 789   void set_vm_result(Register oop_result);
 790 
 791   // Support for NULL-checks
 792   //
 793   // Generates code that causes a NULL OS exception if the content of reg is NULL.
 794   // If the accessed location is M[reg + offset] and the offset is known, provide the
 795   // offset. No explicit code generation is needed if the offset is within a certain
 796   // range (0 <= offset <= page_size).
 797   //
 798   // %%%%%% Currently not done for z/Architecture
 799 
 800   void null_check(Register reg, Register tmp = Z_R0, int64_t offset = -1);
 801   static bool needs_explicit_null_check(intptr_t offset);  // Implemented in shared file ?!
 802 
 803   // Klass oop manipulations if compressed.
 804   void encode_klass_not_null(Register dst, Register src = noreg);
 805   void decode_klass_not_null(Register dst, Register src);
 806   void decode_klass_not_null(Register dst);
 807   void load_klass(Register klass, Address mem);
 808   void load_klass(Register klass, Register src_oop);
 809   void load_prototype_header(Register Rheader, Register Rsrc_oop);
 810   void store_klass(Register klass, Register dst_oop, Register ck = noreg); // Klass will get compressed if ck not provided.
 811   void store_klass_gap(Register s, Register dst_oop);
 812 
 813   // This function calculates the size of the code generated by
 814   //   decode_klass_not_null(register dst)
 815   // when (Universe::heap() != NULL). Hence, if the instructions
 816   // it generates change, then this method needs to be updated.
 817   static int instr_size_for_decode_klass_not_null();
 818 
 819   void encode_heap_oop(Register oop);
 820   void encode_heap_oop_not_null(Register oop);
 821 
 822   static int get_oop_base_pow2_offset(uint64_t oop_base);
 823   int  get_oop_base(Register Rbase, uint64_t oop_base);
 824   int  get_oop_base_complement(Register Rbase, uint64_t oop_base);
 825   void compare_heap_oop(Register Rop1, Address mem, bool maybeNULL);
 826   void compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL);
 827   void load_heap_oop(Register dest, const Address &a);
 828   void load_heap_oop(Register d, int64_t si16, Register s1);
 829   void load_heap_oop_not_null(Register d, int64_t si16, Register s1);
 830   void store_heap_oop(Register Roop, RegisterOrConstant offset, Register base);
 831   void store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base);
 832   void store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base);
 833   void oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
 834                    Register Rbase = Z_R1, int pow2_offset = -1, bool only32bitValid = false);
 835   void oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL,
 836                    Register Rbase = Z_R1, int pow2_offset = -1);
 837 
 838   void resolve_oop_handle(Register result);
 839   void load_mirror(Register mirror, Register method);
 840 
 841   //--------------------------
 842   //---  Operations on arrays.
 843   //--------------------------
 844   unsigned int Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len);
 845   unsigned int Clear_Array_Const(long cnt, Register base);
 846   unsigned int Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len);
 847   unsigned int CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
 848                                              Register cnt_reg,
 849                                              Register tmp1_reg, Register tmp2_reg);
 850 
 851   //-------------------------------------------
 852   // Special String Intrinsics Implementation.
 853   //-------------------------------------------
 854   // Intrinsics for CompactStrings
 855   //   Restores: src, dst
 856   //   Uses:     cnt
 857   //   Kills:    tmp, Z_R0, Z_R1.
 858   //   Early clobber: result.
 859   //   Boolean precise controls accuracy of result value.
 860   unsigned int string_compress(Register result, Register src, Register dst, Register cnt,
 861                                Register tmp,    bool precise);
 862 
 863   // Inflate byte[] to char[].
 864   unsigned int string_inflate_trot(Register src, Register dst, Register cnt, Register tmp);
 865 
 866   // Inflate byte[] to char[].
 867   //   Restores: src, dst
 868   //   Uses:     cnt
 869   //   Kills:    tmp, Z_R0, Z_R1.
 870   unsigned int string_inflate(Register src, Register dst, Register cnt, Register tmp);
 871 
 872   // Inflate byte[] to char[], length known at compile time.
 873   //   Restores: src, dst
 874   //   Kills:    tmp, Z_R0, Z_R1.
 875   // Note:
 876   //   len is signed int. Counts # characters, not bytes.
 877   unsigned int string_inflate_const(Register src, Register dst, Register tmp, int len);
 878 
 879   // Kills src.
 880   unsigned int has_negatives(Register result, Register src, Register cnt,
 881                              Register odd_reg, Register even_reg, Register tmp);
 882 
 883   unsigned int string_compare(Register str1, Register str2, Register cnt1, Register cnt2,
 884                               Register odd_reg, Register even_reg, Register result, int ae);
 885 
 886   unsigned int array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit,
 887                             Register odd_reg, Register even_reg, Register result, bool is_byte);
 888 
 889   unsigned int string_indexof(Register result, Register haystack, Register haycnt,
 890                               Register needle, Register needlecnt, int needlecntval,
 891                               Register odd_reg, Register even_reg, int ae);
 892 
 893   unsigned int string_indexof_char(Register result, Register haystack, Register haycnt,
 894                                    Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte);
 895 
 896   // Emit an oop const to the constant pool and set a relocation info
 897   // with address current_pc. Return the TOC offset of the constant.
 898   int store_const_in_toc(AddressLiteral& val);
 899   int store_oop_in_toc(AddressLiteral& oop);
 900   // Emit an oop const to the constant pool via store_oop_in_toc, or
 901   // emit a scalar const to the constant pool via store_const_in_toc,
 902   // and load the constant into register dst.
 903   bool load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg);
 904   // Get CPU version dependent size of load_const sequence.
 905   // The returned value is valid only for code sequences
 906   // generated by load_const, not load_const_optimized.
 907   static int load_const_from_toc_size() {
 908     return load_long_pcrelative_size();
 909   }
 910   bool load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg);
 911   static intptr_t get_const_from_toc(address pc);
 912   static void     set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb);
 913 
 914   // Dynamic TOC.
 915   static bool is_load_const(address a);
 916   static bool is_load_const_from_toc_pcrelative(address a);
 917   static bool is_load_const_from_toc(address a) { return is_load_const_from_toc_pcrelative(a); }
 918 
 919   // PCrelative TOC access.
 920   static bool is_call_byregister(address a) { return is_z_basr(*(short*)a); }
 921   static bool is_load_const_from_toc_call(address a);
 922   static bool is_load_const_call(address a);
 923   static int load_const_call_size() { return load_const_size() + call_byregister_size(); }
 924   static int load_const_from_toc_call_size() { return load_const_from_toc_size() + call_byregister_size(); }
 925   // Offset is +/- 2**32 -> use long.
 926   static long get_load_const_from_toc_offset(address a);
 927 
 928 
 929   void generate_type_profiling(const Register Rdata,
 930                                const Register Rreceiver_klass,
 931                                const Register Rwanted_receiver_klass,
 932                                const Register Rmatching_row,
 933                                bool is_virtual_call);
 934 
 935   // Bit operations for single register operands.
 936   inline void lshift(Register r, int places, bool doubl = true);   // <<
 937   inline void rshift(Register r, int places, bool doubl = true);   // >>
 938 
 939   //
 940   // Debugging
 941   //
 942 
 943   // Assert on CC (condition code in CPU state).
 944   void asm_assert(bool check_equal, const char* msg, int id) PRODUCT_RETURN;
 945   void asm_assert_low(const char *msg, int id) PRODUCT_RETURN;
 946   void asm_assert_high(const char *msg, int id) PRODUCT_RETURN;
 947   void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
 948   void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
 949 
 950   void asm_assert_static(bool check_equal, const char* msg, int id) PRODUCT_RETURN;
 951 
 952  private:
 953   // Emit assertions.
 954   void asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
 955                             Register mem_base, const char* msg, int id) PRODUCT_RETURN;
 956 
 957  public:
 958   inline void asm_assert_mem4_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
 959     asm_assert_mems_zero(true,  true, 4, mem_offset, mem_base, msg, id);
 960   }
 961   inline void asm_assert_mem8_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
 962     asm_assert_mems_zero(true,  true, 8, mem_offset, mem_base, msg, id);
 963   }
 964   inline void asm_assert_mem4_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
 965     asm_assert_mems_zero(false, true, 4, mem_offset, mem_base, msg, id);
 966   }
 967   inline void asm_assert_mem8_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
 968     asm_assert_mems_zero(false, true, 8, mem_offset, mem_base, msg, id);
 969   }
 970 
 971   inline void asm_assert_mem4_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
 972     asm_assert_mems_zero(true,  false, 4, mem_offset, mem_base, msg, id);
 973   }
 974   inline void asm_assert_mem8_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
 975     asm_assert_mems_zero(true,  false, 8, mem_offset, mem_base, msg, id);
 976   }
 977   inline void asm_assert_mem4_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
 978     asm_assert_mems_zero(false, false, 4, mem_offset, mem_base, msg, id);
 979   }
 980   inline void asm_assert_mem8_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
 981     asm_assert_mems_zero(false, false, 8, mem_offset, mem_base, msg, id);
 982   }
 983   void asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) PRODUCT_RETURN;
 984 
 985   // Verify Z_thread contents.
 986   void verify_thread();
 987 
 988   // Only if +VerifyOops.
 989   void verify_oop(Register reg, const char* s = "broken oop");
 990 
 991   // TODO: verify_method and klass metadata (compare against vptr?).
 992   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 993   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
 994 
 995 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 996 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 997 
 998  private:
 999   // Generate printout in stop().
1000   static const char* stop_types[];
1001   enum {
1002     stop_stop               = 0,
1003     stop_untested           = 1,
1004     stop_unimplemented      = 2,
1005     stop_shouldnotreachhere = 3,
1006     stop_end                = 4
1007   };
1008   // Prints msg and stops execution.
1009   void    stop(int type, const char* msg, int id = 0);
1010   address stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation); // Non-relocateable code only!!
1011   void    stop_static(int type, const char* msg, int id);                                        // Non-relocateable code only!!
1012 
1013  public:
1014 
1015   // Prints msg and stops.
1016   address stop_chain(      address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, true); }
1017   address stop_chain_static(address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, false); }
1018   void stop_static  (const char* msg = "", int id = 0) { stop_static(stop_stop,   msg, id); }
1019   void stop         (const char* msg = "", int id = 0) { stop(stop_stop,          msg, id); }
1020   void untested     (const char* msg = "", int id = 0) { stop(stop_untested,      msg, id); }
1021   void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); }
1022   void should_not_reach_here(const char* msg = "", int id = -1) { stop(stop_shouldnotreachhere, msg, id); }
1023 
1024   // Factor out part of stop into subroutine to save space.
1025   void stop_subroutine();
1026 
1027   // Prints msg, but don't stop.
1028   void warn(const char* msg);
1029 
1030   //-----------------------------
1031   //---  basic block tracing code
1032   //-----------------------------
1033   void trace_basic_block(uint i);
1034   void init_basic_block_trace();
1035   // Number of bytes a basic block gets larger due to the tracing code macro (worst case).
1036   // Currently, worst case is 48 bytes. 64 puts us securely on the safe side.
1037   static int basic_blck_trace_blk_size_incr() { return 64; }
1038 
1039   // Write pattern 0x0101010101010101 in region [low-before, high+after].
1040   // Low and high may be the same registers. Before and after are
1041   // the numbers of 8-byte words.
1042   void zap_from_to(Register low, Register high, Register tmp1 = Z_R0, Register tmp2 = Z_R1,
1043                    int before = 0, int after = 0) PRODUCT_RETURN;
1044 
1045   // Emitters for CRC32 calculation.
1046   // A note on invertCRC:
1047   //   Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
1048   //   CRC32 holds it's current crc value in the externally visible representation.
1049   //   CRC32C holds it's current crc value in internal format, ready for updating.
1050   //   Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
1051   //   In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
1052   //   The bool invertCRC parameter indicates whether bit-flipping is required before updates.
1053  private:
1054   void fold_byte_crc32(Register crc, Register table, Register val, Register tmp);
1055   void fold_8bit_crc32(Register crc, Register table, Register tmp);
1056   void update_byte_crc32( Register crc, Register val, Register table);
1057   void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
1058                              Register data);
1059   void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
1060                           Register t0,  Register t1,  Register t2,  Register t3);
1061  public:
1062   void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
1063                                   bool invertCRC);
1064   void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
1065                                bool invertCRC);
1066   void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
1067                           Register t0,  Register t1,  Register t2,  Register t3,
1068                           bool invertCRC);
1069   void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
1070                           Register t0,  Register t1,  Register t2,  Register t3,
1071                           bool invertCRC);
1072   void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
1073                           Register t0,  Register t1,  Register t2,  Register t3,
1074                           bool invertCRC);
1075 
1076   // Emitters for BigInteger.multiplyToLen intrinsic
1077   // note: length of result array (zlen) is passed on the stack
1078  private:
1079   void add2_with_carry(Register dest_hi, Register dest_lo,
1080                        Register src1, Register src2);
1081   void multiply_64_x_64_loop(Register x, Register xstart,
1082                              Register x_xstart,
1083                              Register y, Register y_idx, Register z,
1084                              Register carry, Register product,
1085                              Register idx, Register kdx);
1086   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1087                               Register yz_idx, Register idx,
1088                               Register carry, Register product, int offset);
1089   void multiply_128_x_128_loop(Register x_xstart,
1090                                Register y, Register z,
1091                                Register yz_idx, Register idx,
1092                                Register jdx,
1093                                Register carry, Register product,
1094                                Register carry2);
1095  public:
1096   void multiply_to_len(Register x, Register xlen,
1097                        Register y, Register ylen,
1098                        Register z,
1099                        Register tmp1, Register tmp2,
1100                        Register tmp3, Register tmp4, Register tmp5);
1101 };
1102 
1103 /**
1104  * class SkipIfEqual:
1105  *
1106  * Instantiating this class will result in assembly code being output that will
1107  * jump around any code emitted between the creation of the instance and it's
1108  * automatic destruction at the end of a scope block, depending on the value of
1109  * the flag passed to the constructor, which will be checked at run-time.
1110  */
1111 class SkipIfEqual {
1112  private:
1113   MacroAssembler* _masm;
1114   Label _label;
1115 
1116  public:
1117   SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register _rscratch);
1118   ~SkipIfEqual();
1119 };
1120 
1121 #ifdef ASSERT
1122 // Return false (e.g. important for our impl. of virtual calls).
1123 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1124 #endif
1125 
1126 #endif // CPU_S390_VM_MACROASSEMBLER_S390_HPP