1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_ARM_VM_NATIVEINST_ARM_64_HPP
  26 #define CPU_ARM_VM_NATIVEINST_ARM_64_HPP
  27 
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "runtime/icache.hpp"
  32 #include "runtime/os.hpp"
  33 
  34 // -------------------------------------------------------------------
  35 
  36 // Some experimental projects extend the ARM back-end by implementing
  37 // what the front-end usually assumes is a single native instruction
  38 // with a sequence of instructions.
  39 //
  40 // The 'Raw' variants are the low level initial code (usually one
  41 // instruction wide but some of them were already composed
  42 // instructions). They should be used only by the back-end.
  43 //
  44 // The non-raw classes are the front-end entry point, hiding potential
  45 // back-end extensions or the actual instructions size.
  46 class NativeInstruction;
  47 
  48 class RawNativeInstruction {
  49  public:
  50 
  51   enum ARM_specific {
  52     instruction_size = Assembler::InstructionSize,
  53     instruction_size_in_bits = instruction_size * BitsPerByte,
  54   };
  55 
  56   // illegal instruction used by NativeJump::patch_verified_entry
  57   static const int zombie_illegal_instruction = 0xd4000542; // hvc #42
  58 
  59   address addr_at(int offset)        const { return (address)this + offset; }
  60   address instruction_address()      const { return addr_at(0); }
  61   address next_raw_instruction_address() const { return addr_at(instruction_size); }
  62 
  63   static RawNativeInstruction* at(address address) {
  64     return (RawNativeInstruction*)address;
  65   }
  66 
  67   RawNativeInstruction* next_raw() const {
  68     return at(next_raw_instruction_address());
  69   }
  70 
  71   int encoding() const {
  72     return *(int*)this;
  73   }
  74 
  75   void set_encoding(int value) {
  76     int old = encoding();
  77     if (old != value) {
  78       *(int*)this = value;
  79       ICache::invalidate_word((address)this);
  80     }
  81   }
  82 
  83   bool is_nop()                      const { return encoding() == (int)0xd503201f; }
  84   bool is_b()                        const { return (encoding() & 0xfc000000) == 0x14000000; } // unconditional branch
  85   bool is_b_cond()                   const { return (encoding() & 0xff000010) == 0x54000000; } // conditional branch
  86   bool is_bl()                       const { return (encoding() & 0xfc000000) == 0x94000000; }
  87   bool is_br()                       const { return (encoding() & 0xfffffc1f) == 0xd61f0000; }
  88   bool is_blr()                      const { return (encoding() & 0xfffffc1f) == 0xd63f0000; }
  89   bool is_ldr_literal()              const { return (encoding() & 0xff000000) == 0x58000000; }
  90   bool is_adr_aligned()              const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
  91   bool is_adr_aligned_lr()           const { return (encoding() & 0xff00001f) == 0x1000001e; } // adr LR, <label>, where label is aligned to 4 bytes (address of instruction).
  92 
  93   bool is_ldr_str_gp_reg_unsigned_imm()   const { return (encoding() & 0x3f000000) == 0x39000000; } // ldr/str{b, sb, h, sh, _w, sw} Rt, [Rn, #imm]
  94   bool is_ldr_str_fp_reg_unsigned_imm()   const { return (encoding() & 0x3f000000) == 0x3D000000; } // ldr/str Rt(SIMD), [Rn, #imm]
  95   bool is_ldr_str_reg_unsigned_imm()      const { return is_ldr_str_gp_reg_unsigned_imm() || is_ldr_str_fp_reg_unsigned_imm(); }
  96 
  97   bool is_stp_preindex()             const { return (encoding() & 0xffc00000) == 0xa9800000; } // stp Xt1, Xt2, [Xn, #imm]!
  98   bool is_ldp_postindex()            const { return (encoding() & 0xffc00000) == 0xa8c00000; } // ldp Xt1, Xt2, [Xn] #imm
  99   bool is_mov_sp()                   const { return (encoding() & 0xfffffc00) == 0x91000000; } // mov <Xn|SP>, <Xm|SP>
 100   bool is_movn()                     const { return (encoding() & 0x7f800000) == 0x12800000; }
 101   bool is_movz()                     const { return (encoding() & 0x7f800000) == 0x52800000; }
 102   bool is_movk()                     const { return (encoding() & 0x7f800000) == 0x72800000; }
 103   bool is_orr_imm()                  const { return (encoding() & 0x7f800000) == 0x32000000; }
 104   bool is_cmp_rr()                   const { return (encoding() & 0x7fe00000) == 0x6b000000; }
 105   bool is_csel()                     const { return (encoding() & 0x7fe00000) == 0x1a800000; }
 106   bool is_sub_shift()                const { return (encoding() & 0x7f200000) == 0x4b000000; } // sub Rd, Rn, shift (Rm, imm)
 107   bool is_mov()                      const { return (encoding() & 0x7fe0ffe0) == 0x2a0003e0; } // mov Rd, Rm (orr Rd, ZR, shift (Rm, 0))
 108   bool is_tst()                      const { return (encoding() & 0x7f20001f) == 0x6a00001f; } // tst Rn, shift (Rm, imm) (ands ZR, Rn, shift(Rm, imm))
 109   bool is_lsr_imm()                  const { return (encoding() & 0x7f807c00) == 0x53007c00; } // lsr Rd, Rn, imm (ubfm Rd, Rn, imm, 31/63)
 110 
 111   bool is_far_jump()                 const { return is_ldr_literal() && next_raw()->is_br(); }
 112   bool is_fat_call()                 const {
 113     return
 114 #ifdef COMPILER2
 115       (is_blr() && next_raw()->is_b()) ||
 116 #endif
 117       (is_adr_aligned_lr() && next_raw()->is_br());
 118   }
 119   bool is_far_call()                 const {
 120     return is_ldr_literal() && next_raw()->is_fat_call();
 121   }
 122 
 123   bool is_ic_near_call()             const { return is_adr_aligned_lr() && next_raw()->is_b(); }
 124   bool is_ic_far_call()              const { return is_adr_aligned_lr() && next_raw()->is_ldr_literal() && next_raw()->next_raw()->is_br(); }
 125   bool is_ic_call()                  const { return is_ic_near_call() || is_ic_far_call(); }
 126 
 127   bool is_jump()                     const { return is_b() || is_far_jump(); }
 128   bool is_call()                     const { return is_bl() || is_far_call() || is_ic_call(); }
 129   bool is_branch()                   const { return is_b() || is_bl(); }
 130 
 131   // c2 doesn't use fixed registers for safepoint poll address
 132   bool is_safepoint_poll() const {
 133     return true;
 134   }
 135 
 136   bool is_save_all_registers(const RawNativeInstruction** next) const {
 137     const RawNativeInstruction* current = this;
 138 
 139     if (!current->is_stp_preindex()) return false; current = current->next_raw();
 140     for (int i = 28; i >= 0; i -= 2) {
 141       if (!current->is_stp_preindex()) return false; current = current->next_raw();
 142     }
 143 
 144     if (!current->is_adr_aligned())                 return false; current = current->next_raw();
 145     if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
 146     if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
 147 
 148     *next = (RawNativeInstruction*) current;
 149     return true;
 150   }
 151 
 152   bool is_restore_all_registers(const RawNativeInstruction** next) const {
 153     const RawNativeInstruction* current = this;
 154 
 155     for (int i = 0; i <= 28; i += 2) {
 156       if (!current->is_ldp_postindex()) return false; current = current->next_raw();
 157     }
 158     if (!current->is_ldp_postindex()) return false; current = current->next_raw();
 159 
 160     *next = (RawNativeInstruction*) current;
 161     return true;
 162   }
 163 
 164   const RawNativeInstruction* skip_bind_literal() const {
 165     const RawNativeInstruction* current = this;
 166     if (((uintptr_t)current) % wordSize != 0) {
 167       assert(current->is_nop(), "should be");
 168       current = current->next_raw();
 169     }
 170     assert(((uintptr_t)current) % wordSize == 0, "should be"); // bound literal should be aligned
 171     current = current->next_raw()->next_raw();
 172     return current;
 173   }
 174 
 175   bool is_stop(const RawNativeInstruction** next) const {
 176     const RawNativeInstruction* current = this;
 177 
 178     if (!current->is_save_all_registers(&current)) return false;
 179     if (!current->is_ldr_literal())                return false; current = current->next_raw();
 180     if (!current->is_mov_sp())                     return false; current = current->next_raw();
 181     if (!current->is_ldr_literal())                return false; current = current->next_raw();
 182     if (!current->is_br())                         return false; current = current->next_raw();
 183 
 184     current = current->skip_bind_literal();
 185     current = current->skip_bind_literal();
 186 
 187     *next = (RawNativeInstruction*) current;
 188     return true;
 189   }
 190 
 191   bool is_mov_slow(const RawNativeInstruction** next = NULL) const {
 192     const RawNativeInstruction* current = this;
 193 
 194     if (current->is_orr_imm()) {
 195       current = current->next_raw();
 196 
 197     } else if (current->is_movn() || current->is_movz()) {
 198       current = current->next_raw();
 199       int movkCount = 0;
 200       while (current->is_movk()) {
 201         movkCount++;
 202         if (movkCount > 3) return false;
 203         current = current->next_raw();
 204       }
 205 
 206     } else {
 207       return false;
 208     }
 209 
 210     if (next != NULL) {
 211       *next = (RawNativeInstruction*)current;
 212     }
 213     return true;
 214   }
 215 
 216 #ifdef ASSERT
 217   void skip_verify_heapbase(const RawNativeInstruction** next) const {
 218     const RawNativeInstruction* current = this;
 219 
 220     if (CheckCompressedOops) {
 221       if (!current->is_ldr_str_gp_reg_unsigned_imm()) return; current = current->next_raw();
 222       if (!current->is_stp_preindex())      return; current = current->next_raw();
 223       // NOTE: temporary workaround, remove with m6-01?
 224       // skip saving condition flags
 225       current = current->next_raw();
 226       current = current->next_raw();
 227 
 228       if (!current->is_mov_slow(&current))  return;
 229       if (!current->is_cmp_rr())            return; current = current->next_raw();
 230       if (!current->is_b_cond())            return; current = current->next_raw();
 231       if (!current->is_stop(&current))      return;
 232 
 233 #ifdef COMPILER2
 234       if (current->is_nop()) current = current->next_raw();
 235 #endif
 236       // NOTE: temporary workaround, remove with m6-01?
 237       // skip restoring condition flags
 238       current = current->next_raw();
 239       current = current->next_raw();
 240 
 241       if (!current->is_ldp_postindex())     return; current = current->next_raw();
 242       if (!current->is_ldr_str_gp_reg_unsigned_imm()) return; current = current->next_raw();
 243     }
 244 
 245     *next = (RawNativeInstruction*) current;
 246   }
 247 #endif // ASSERT
 248 
 249   bool is_ldr_global_ptr(const RawNativeInstruction** next) const {
 250     const RawNativeInstruction* current = this;
 251 
 252     if (!current->is_mov_slow(&current))            return false;
 253     if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
 254 
 255     *next = (RawNativeInstruction*) current;
 256     return true;
 257   }
 258 
 259   void skip_verify_oop(const RawNativeInstruction** next) const {
 260     const RawNativeInstruction* current = this;
 261 
 262     if (VerifyOops) {
 263       if (!current->is_save_all_registers(&current)) return;
 264 
 265       if (current->is_mov()) {
 266         current = current->next_raw();
 267       }
 268 
 269       if (!current->is_mov_sp())                        return; current = current->next_raw();
 270       if (!current->is_ldr_literal())                   return; current = current->next_raw();
 271       if (!current->is_ldr_global_ptr(&current))        return;
 272       if (!current->is_blr())                           return; current = current->next_raw();
 273       if (!current->is_restore_all_registers(&current)) return;
 274       if (!current->is_b())                             return; current = current->next_raw();
 275 
 276       current = current->skip_bind_literal();
 277     }
 278 
 279     *next = (RawNativeInstruction*) current;
 280   }
 281 
 282   void skip_encode_heap_oop(const RawNativeInstruction** next) const {
 283     const RawNativeInstruction* current = this;
 284 
 285     assert (Universe::heap() != NULL, "java heap should be initialized");
 286 #ifdef ASSERT
 287     current->skip_verify_heapbase(&current);
 288 #endif // ASSERT
 289     current->skip_verify_oop(&current);
 290 
 291     if (Universe::narrow_oop_base() == NULL) {
 292       if (Universe::narrow_oop_shift() != 0) {
 293         if (!current->is_lsr_imm()) return; current = current->next_raw();
 294       } else {
 295         if (current->is_mov()) {
 296           current = current->next_raw();
 297         }
 298       }
 299     } else {
 300       if (!current->is_tst())       return; current = current->next_raw();
 301       if (!current->is_csel())      return; current = current->next_raw();
 302       if (!current->is_sub_shift()) return; current = current->next_raw();
 303       if (Universe::narrow_oop_shift() != 0) {
 304         if (!current->is_lsr_imm())  return; current = current->next_raw();
 305       }
 306     }
 307 
 308     *next = (RawNativeInstruction*) current;
 309   }
 310 
 311   void verify();
 312 
 313   // For unit tests
 314   static void test() {}
 315 
 316  private:
 317 
 318   void check_bits_range(int bits, int scale, int low_bit) const {
 319     assert((0 <= low_bit) && (0 < bits) && (low_bit + bits <= instruction_size_in_bits), "invalid bits range");
 320     assert((0 <= scale) && (scale <= 4), "scale is out of range");
 321   }
 322 
 323   void set_imm(int imm_encoding, int bits, int low_bit) {
 324     int imm_mask = right_n_bits(bits) << low_bit;
 325     assert((imm_encoding & ~imm_mask) == 0, "invalid imm encoding");
 326     set_encoding((encoding() & ~imm_mask) | imm_encoding);
 327   }
 328 
 329  protected:
 330 
 331   // Returns signed immediate from [low_bit .. low_bit + bits - 1] bits of this instruction, scaled by given scale.
 332   int get_signed_imm(int bits, int scale, int low_bit) const {
 333     check_bits_range(bits, scale, low_bit);
 334     int high_bits_to_clean = (instruction_size_in_bits - (low_bit + bits));
 335     return encoding() << high_bits_to_clean >> (high_bits_to_clean + low_bit) << scale;
 336   }
 337 
 338   // Puts given signed immediate into the [low_bit .. low_bit + bits - 1] bits of this instruction.
 339   void set_signed_imm(int value, int bits, int scale, int low_bit) {
 340     set_imm(Assembler::encode_imm(value, bits, scale, low_bit), bits, low_bit);
 341   }
 342 
 343   // Returns unsigned immediate from [low_bit .. low_bit + bits - 1] bits of this instruction, scaled by given scale.
 344   int get_unsigned_imm(int bits, int scale, int low_bit) const {
 345     check_bits_range(bits, scale, low_bit);
 346     return ((encoding() >> low_bit) & right_n_bits(bits)) << scale;
 347   }
 348 
 349   // Puts given unsigned immediate into the [low_bit .. low_bit + bits - 1] bits of this instruction.
 350   void set_unsigned_imm(int value, int bits, int scale, int low_bit) {
 351     set_imm(Assembler::encode_unsigned_imm(value, bits, scale, low_bit), bits, low_bit);
 352   }
 353 
 354   int get_signed_offset(int bits, int low_bit) const {
 355     return get_signed_imm(bits, 2, low_bit);
 356   }
 357 
 358   void set_signed_offset(int offset, int bits, int low_bit) {
 359     set_signed_imm(offset, bits, 2, low_bit);
 360   }
 361 };
 362 
 363 inline RawNativeInstruction* rawNativeInstruction_at(address address) {
 364   RawNativeInstruction* instr = RawNativeInstruction::at(address);
 365 #ifdef ASSERT
 366   instr->verify();
 367 #endif // ASSERT
 368   return instr;
 369 }
 370 
 371 // -------------------------------------------------------------------
 372 
 373 // Load/store register (unsigned scaled immediate)
 374 class NativeMovRegMem: public RawNativeInstruction {
 375  private:
 376   int get_offset_scale() const {
 377     return get_unsigned_imm(2, 0, 30);
 378   }
 379 
 380  public:
 381   int offset() const {
 382     return get_unsigned_imm(12, get_offset_scale(), 10);
 383   }
 384 
 385   void set_offset(int x);
 386 
 387   void add_offset_in_bytes(int add_offset) {
 388     set_offset(offset() + add_offset);
 389   }
 390 };
 391 
 392 inline NativeMovRegMem* nativeMovRegMem_at(address address) {
 393   const RawNativeInstruction* instr = rawNativeInstruction_at(address);
 394 
 395 #ifdef COMPILER1
 396     // NOP required for C1 patching
 397     if (instr->is_nop()) {
 398       instr = instr->next_raw();
 399     }
 400 #endif
 401 
 402   instr->skip_encode_heap_oop(&instr);
 403 
 404   assert(instr->is_ldr_str_reg_unsigned_imm(), "must be");
 405   return (NativeMovRegMem*)instr;
 406 }
 407 
 408 // -------------------------------------------------------------------
 409 
 410 class NativeInstruction : public RawNativeInstruction {
 411 public:
 412   static NativeInstruction* at(address address) {
 413     return (NativeInstruction*)address;
 414   }
 415 
 416 public:
 417   // No need to consider indirections while parsing NativeInstruction
 418   address next_instruction_address() const {
 419     return next_raw_instruction_address();
 420   }
 421 
 422   // next() is no longer defined to avoid confusion.
 423   //
 424   // The front end and most classes except for those defined in nativeInst_arm
 425   // or relocInfo_arm should only use next_instruction_address(), skipping
 426   // over composed instruction and ignoring back-end extensions.
 427   //
 428   // The back-end can use next_raw() when it knows the instruction sequence
 429   // and only wants to skip a single native instruction.
 430 };
 431 
 432 inline NativeInstruction* nativeInstruction_at(address address) {
 433   NativeInstruction* instr = NativeInstruction::at(address);
 434 #ifdef ASSERT
 435   instr->verify();
 436 #endif // ASSERT
 437   return instr;
 438 }
 439 
 440 // -------------------------------------------------------------------
 441 class NativeInstructionLdrLiteral: public NativeInstruction {
 442  public:
 443   address literal_address() {
 444     address la = instruction_address() + get_signed_offset(19, 5);
 445     assert(la != instruction_address(), "literal points to instruction");
 446     return la;
 447   }
 448 
 449   address after_literal_address() {
 450     return literal_address() + wordSize;
 451   }
 452 
 453   void set_literal_address(address addr, address pc) {
 454     assert(is_ldr_literal(), "must be");
 455     int opc = (encoding() >> 30) & 0x3;
 456     assert (opc != 0b01 || addr == pc || ((uintx)addr & 7) == 0, "ldr target should be aligned");
 457     set_signed_offset(addr - pc, 19, 5);
 458   }
 459 
 460   void set_literal_address(address addr) {
 461     set_literal_address(addr, instruction_address());
 462   }
 463 
 464   address literal_value() {
 465     return *(address*)literal_address();
 466   }
 467 
 468   void set_literal_value(address dest) {
 469     *(address*)literal_address() = dest;
 470   }
 471 };
 472 
 473 inline NativeInstructionLdrLiteral* nativeLdrLiteral_at(address address) {
 474   assert(nativeInstruction_at(address)->is_ldr_literal(), "must be");
 475   return (NativeInstructionLdrLiteral*)address;
 476 }
 477 
 478 // -------------------------------------------------------------------
 479 // Common class for branch instructions with 26-bit immediate offset: B (unconditional) and BL
 480 class NativeInstructionBranchImm26: public NativeInstruction {
 481  public:
 482   address destination(int adj = 0) const {
 483     return instruction_address() + get_signed_offset(26, 0) + adj;
 484   }
 485 
 486   void set_destination(address dest) {
 487     intptr_t offset = (intptr_t)(dest - instruction_address());
 488     assert((offset & 0x3) == 0, "should be aligned");
 489     set_signed_offset(offset, 26, 0);
 490   }
 491 };
 492 
 493 inline NativeInstructionBranchImm26* nativeB_at(address address) {
 494   assert(nativeInstruction_at(address)->is_b(), "must be");
 495   return (NativeInstructionBranchImm26*)address;
 496 }
 497 
 498 inline NativeInstructionBranchImm26* nativeBL_at(address address) {
 499   assert(nativeInstruction_at(address)->is_bl(), "must be");
 500   return (NativeInstructionBranchImm26*)address;
 501 }
 502 
 503 // -------------------------------------------------------------------
 504 class NativeInstructionAdrLR: public NativeInstruction {
 505  public:
 506   // Returns address which is loaded into LR by this instruction.
 507   address target_lr_value() {
 508     return instruction_address() + get_signed_offset(19, 5);
 509   }
 510 };
 511 
 512 inline NativeInstructionAdrLR* nativeAdrLR_at(address address) {
 513   assert(nativeInstruction_at(address)->is_adr_aligned_lr(), "must be");
 514   return (NativeInstructionAdrLR*)address;
 515 }
 516 
 517 // -------------------------------------------------------------------
 518 class RawNativeCall: public NativeInstruction {
 519  public:
 520 
 521   address return_address() const {
 522     if (is_bl()) {
 523       return next_raw_instruction_address();
 524 
 525     } else if (is_far_call()) {
 526 #ifdef COMPILER2
 527       if (next_raw()->is_blr()) {
 528         // ldr_literal; blr; ret_addr: b skip_literal;
 529         return addr_at(2 * instruction_size);
 530       }
 531 #endif
 532       assert(next_raw()->is_adr_aligned_lr() && next_raw()->next_raw()->is_br(), "must be");
 533       return nativeLdrLiteral_at(instruction_address())->after_literal_address();
 534 
 535     } else if (is_ic_call()) {
 536       return nativeAdrLR_at(instruction_address())->target_lr_value();
 537 
 538     } else {
 539       ShouldNotReachHere();
 540       return NULL;
 541     }
 542   }
 543 
 544   address destination(int adj = 0) const {
 545     if (is_bl()) {
 546       return nativeBL_at(instruction_address())->destination(adj);
 547 
 548     } else if (is_far_call()) {
 549       return nativeLdrLiteral_at(instruction_address())->literal_value();
 550 
 551     } else if (is_adr_aligned_lr()) {
 552       RawNativeInstruction *next = next_raw();
 553       if (next->is_b()) {
 554         // ic_near_call
 555         return nativeB_at(next->instruction_address())->destination(adj);
 556       } else if (next->is_far_jump()) {
 557         // ic_far_call
 558         return nativeLdrLiteral_at(next->instruction_address())->literal_value();
 559       }
 560     }
 561     ShouldNotReachHere();
 562     return NULL;
 563   }
 564 
 565   void set_destination(address dest) {
 566     if (is_bl()) {
 567       nativeBL_at(instruction_address())->set_destination(dest);
 568       return;
 569     }
 570     if (is_far_call()) {
 571       nativeLdrLiteral_at(instruction_address())->set_literal_value(dest);
 572       OrderAccess::storeload(); // overkill if caller holds lock?
 573       return;
 574     }
 575     if (is_adr_aligned_lr()) {
 576       RawNativeInstruction *next = next_raw();
 577       if (next->is_b()) {
 578         // ic_near_call
 579         nativeB_at(next->instruction_address())->set_destination(dest);
 580         return;
 581       }
 582       if (next->is_far_jump()) {
 583         // ic_far_call
 584         nativeLdrLiteral_at(next->instruction_address())->set_literal_value(dest);
 585         OrderAccess::storeload(); // overkill if caller holds lock?
 586         return;
 587       }
 588     }
 589     ShouldNotReachHere();
 590   }
 591 
 592   void set_destination_mt_safe(address dest) {
 593     assert(CodeCache::contains(dest), "call target should be from code cache (required by ic_call and patchable_call)");
 594     set_destination(dest);
 595   }
 596 
 597   void verify() {
 598     assert(RawNativeInstruction::is_call(), "should be");
 599   }
 600 
 601   void verify_alignment() {
 602     // Nothing to do on ARM
 603   }
 604 };
 605 
 606 inline RawNativeCall* rawNativeCall_at(address address) {
 607   RawNativeCall * call = (RawNativeCall*)address;
 608   call->verify();
 609   return call;
 610 }
 611 
 612 class NativeCall: public RawNativeCall {
 613  public:
 614 
 615   // NativeCall::next_instruction_address() is used only to define the
 616   // range where to look for the relocation information. We need not
 617   // walk over composed instructions (as long as the relocation information
 618   // is associated to the first instruction).
 619   address next_instruction_address() const {
 620     return next_raw_instruction_address();
 621   }
 622 
 623   static bool is_call_before(address return_address);
 624 };
 625 
 626 inline NativeCall* nativeCall_at(address address) {
 627   NativeCall * call = (NativeCall*)address;
 628   call->verify();
 629   return call;
 630 }
 631 
 632 NativeCall* nativeCall_before(address return_address);
 633 
 634 // -------------------------------------------------------------------
 635 class NativeGeneralJump: public NativeInstruction {
 636  public:
 637 
 638   address jump_destination() const {
 639     return nativeB_at(instruction_address())->destination();
 640   }
 641 
 642   static void replace_mt_safe(address instr_addr, address code_buffer);
 643 
 644   static void insert_unconditional(address code_pos, address entry);
 645 
 646 };
 647 
 648 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 649   assert(nativeInstruction_at(address)->is_b(), "must be");
 650   return (NativeGeneralJump*)address;
 651 }
 652 
 653 // -------------------------------------------------------------------
 654 class RawNativeJump: public NativeInstruction {
 655  public:
 656 
 657   address jump_destination(int adj = 0) const {
 658     if (is_b()) {
 659       address a = nativeB_at(instruction_address())->destination(adj);
 660       // Jump destination -1 is encoded as a jump to self
 661       if (a == instruction_address()) {
 662         return (address)-1;
 663       }
 664       return a;
 665     } else {
 666       assert(is_far_jump(), "should be");
 667       return nativeLdrLiteral_at(instruction_address())->literal_value();
 668     }
 669   }
 670 
 671   void set_jump_destination(address dest) {
 672     if (is_b()) {
 673       // Jump destination -1 is encoded as a jump to self
 674       if (dest == (address)-1) {
 675         dest = instruction_address();
 676       }
 677       nativeB_at(instruction_address())->set_destination(dest);
 678     } else {
 679       assert(is_far_jump(), "should be");
 680       nativeLdrLiteral_at(instruction_address())->set_literal_value(dest);
 681     }
 682   }
 683 };
 684 
 685 inline RawNativeJump* rawNativeJump_at(address address) {
 686   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
 687   return (RawNativeJump*)address;
 688 }
 689 
 690 // -------------------------------------------------------------------
 691 class NativeMovConstReg: public NativeInstruction {
 692 
 693   NativeMovConstReg *adjust() const {
 694     return (NativeMovConstReg *)adjust(this);
 695   }
 696 
 697  public:
 698 
 699   static RawNativeInstruction *adjust(const RawNativeInstruction *ni) {
 700 #ifdef COMPILER1
 701     // NOP required for C1 patching
 702     if (ni->is_nop()) {
 703       return ni->next_raw();
 704     }
 705 #endif
 706     return (RawNativeInstruction *)ni;
 707   }
 708 
 709   intptr_t _data() const;
 710   void set_data(intptr_t x);
 711 
 712   intptr_t data() const {
 713     return adjust()->_data();
 714   }
 715 
 716   bool is_pc_relative() {
 717     return adjust()->is_ldr_literal();
 718   }
 719 
 720   void _set_pc_relative_offset(address addr, address pc) {
 721     assert(is_ldr_literal(), "must be");
 722     nativeLdrLiteral_at(instruction_address())->set_literal_address(addr, pc);
 723   }
 724 
 725   void set_pc_relative_offset(address addr, address pc) {
 726     NativeMovConstReg *ni = adjust();
 727     int dest_adj = ni->instruction_address() - instruction_address();
 728     ni->_set_pc_relative_offset(addr, pc + dest_adj);
 729   }
 730 
 731   address _next_instruction_address() const {
 732 #ifdef COMPILER2
 733     if (is_movz()) {
 734       // narrow constant
 735       RawNativeInstruction* ni = next_raw();
 736       assert(ni->is_movk(), "movz;movk expected");
 737       return ni->next_raw_instruction_address();
 738     }
 739 #endif
 740     assert(is_ldr_literal(), "must be");
 741     return NativeInstruction::next_raw_instruction_address();
 742   }
 743 
 744   address next_instruction_address() const {
 745     return adjust()->_next_instruction_address();
 746   }
 747 };
 748 
 749 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 750   RawNativeInstruction* ni = rawNativeInstruction_at(address);
 751 
 752   ni = NativeMovConstReg::adjust(ni);
 753 
 754   assert(ni->is_mov_slow() || ni->is_ldr_literal(), "must be");
 755   return (NativeMovConstReg*)address;
 756 }
 757 
 758 // -------------------------------------------------------------------
 759 class NativeJump: public RawNativeJump {
 760  public:
 761 
 762   static void check_verified_entry_alignment(address entry, address verified_entry);
 763 
 764   static void patch_verified_entry(address entry, address verified_entry, address dest);
 765 };
 766 
 767 inline NativeJump* nativeJump_at(address address) {
 768   assert(nativeInstruction_at(address)->is_jump(), "must be");
 769   return (NativeJump*)address;
 770 }
 771 
 772 #endif // CPU_ARM_VM_NATIVEINST_ARM_64_HPP