1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_ARM_VM_NATIVEINST_ARM_64_HPP
  26 #define CPU_ARM_VM_NATIVEINST_ARM_64_HPP
  27 
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "runtime/icache.hpp"
  31 #include "runtime/os.hpp"
  32 
  33 // -------------------------------------------------------------------
  34 
  35 // Some experimental projects extend the ARM back-end by implementing
  36 // what the front-end usually assumes is a single native instruction
  37 // with a sequence of instructions.
  38 //
  39 // The 'Raw' variants are the low level initial code (usually one
  40 // instruction wide but some of them were already composed
  41 // instructions). They should be used only by the back-end.
  42 //
  43 // The non-raw classes are the front-end entry point, hiding potential
  44 // back-end extensions or the actual instructions size.
  45 class NativeInstruction;
  46 
  47 class RawNativeInstruction {
  48  public:
  49 
  50   enum ARM_specific {
  51     instruction_size = Assembler::InstructionSize,
  52     instruction_size_in_bits = instruction_size * BitsPerByte,
  53   };
  54 
  55   // illegal instruction used by NativeJump::patch_verified_entry
  56   static const int zombie_illegal_instruction = 0xd4000542; // hvc #42
  57 
  58   address addr_at(int offset)        const { return (address)this + offset; }
  59   address instruction_address()      const { return addr_at(0); }
  60   address next_raw_instruction_address() const { return addr_at(instruction_size); }
  61 
  62   static RawNativeInstruction* at(address address) {
  63     return (RawNativeInstruction*)address;
  64   }
  65 
  66   RawNativeInstruction* next_raw() const {
  67     return at(next_raw_instruction_address());
  68   }
  69 
  70   int encoding() const {
  71     return *(int*)this;
  72   }
  73 
  74   void set_encoding(int value) {
  75     int old = encoding();
  76     if (old != value) {
  77       *(int*)this = value;
  78       ICache::invalidate_word((address)this);
  79     }
  80   }
  81 
  82   bool is_nop()                      const { return encoding() == (int)0xd503201f; }
  83   bool is_b()                        const { return (encoding() & 0xfc000000) == 0x14000000; } // unconditional branch
  84   bool is_b_cond()                   const { return (encoding() & 0xff000010) == 0x54000000; } // conditional branch
  85   bool is_bl()                       const { return (encoding() & 0xfc000000) == 0x94000000; }
  86   bool is_br()                       const { return (encoding() & 0xfffffc1f) == 0xd61f0000; }
  87   bool is_blr()                      const { return (encoding() & 0xfffffc1f) == 0xd63f0000; }
  88   bool is_ldr_literal()              const { return (encoding() & 0xff000000) == 0x58000000; }
  89   bool is_adr_aligned()              const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
  90   bool is_adr_aligned_lr()           const { return (encoding() & 0xff00001f) == 0x1000001e; } // adr LR, <label>, where label is aligned to 4 bytes (address of instruction).
  91 
  92   bool is_ldr_str_gp_reg_unsigned_imm()   const { return (encoding() & 0x3f000000) == 0x39000000; } // ldr/str{b, sb, h, sh, _w, sw} Rt, [Rn, #imm]
  93   bool is_ldr_str_fp_reg_unsigned_imm()   const { return (encoding() & 0x3f000000) == 0x3D000000; } // ldr/str Rt(SIMD), [Rn, #imm]
  94   bool is_ldr_str_reg_unsigned_imm()      const { return is_ldr_str_gp_reg_unsigned_imm() || is_ldr_str_fp_reg_unsigned_imm(); }
  95 
  96   bool is_stp_preindex()             const { return (encoding() & 0xffc00000) == 0xa9800000; } // stp Xt1, Xt2, [Xn, #imm]!
  97   bool is_ldp_postindex()            const { return (encoding() & 0xffc00000) == 0xa8c00000; } // ldp Xt1, Xt2, [Xn] #imm
  98   bool is_mov_sp()                   const { return (encoding() & 0xfffffc00) == 0x91000000; } // mov <Xn|SP>, <Xm|SP>
  99   bool is_movn()                     const { return (encoding() & 0x7f800000) == 0x12800000; }
 100   bool is_movz()                     const { return (encoding() & 0x7f800000) == 0x52800000; }
 101   bool is_movk()                     const { return (encoding() & 0x7f800000) == 0x72800000; }
 102   bool is_orr_imm()                  const { return (encoding() & 0x7f800000) == 0x32000000; }
 103   bool is_cmp_rr()                   const { return (encoding() & 0x7fe00000) == 0x6b000000; }
 104   bool is_csel()                     const { return (encoding() & 0x7fe00000) == 0x1a800000; }
 105   bool is_sub_shift()                const { return (encoding() & 0x7f200000) == 0x4b000000; } // sub Rd, Rn, shift (Rm, imm)
 106   bool is_mov()                      const { return (encoding() & 0x7fe0ffe0) == 0x2a0003e0; } // mov Rd, Rm (orr Rd, ZR, shift (Rm, 0))
 107   bool is_tst()                      const { return (encoding() & 0x7f20001f) == 0x6a00001f; } // tst Rn, shift (Rm, imm) (ands ZR, Rn, shift(Rm, imm))
 108   bool is_lsr_imm()                  const { return (encoding() & 0x7f807c00) == 0x53007c00; } // lsr Rd, Rn, imm (ubfm Rd, Rn, imm, 31/63)
 109 
 110   bool is_far_jump()                 const { return is_ldr_literal() && next_raw()->is_br(); }
 111   bool is_fat_call()                 const {
 112     return
 113 #ifdef COMPILER2
 114       (is_blr() && next_raw()->is_b()) ||
 115 #endif
 116       (is_adr_aligned_lr() && next_raw()->is_br());
 117   }
 118   bool is_far_call()                 const {
 119     return is_ldr_literal() && next_raw()->is_fat_call();
 120   }
 121 
 122   bool is_ic_near_call()             const { return is_adr_aligned_lr() && next_raw()->is_b(); }
 123   bool is_ic_far_call()              const { return is_adr_aligned_lr() && next_raw()->is_ldr_literal() && next_raw()->next_raw()->is_br(); }
 124   bool is_ic_call()                  const { return is_ic_near_call() || is_ic_far_call(); }
 125 
 126   bool is_jump()                     const { return is_b() || is_far_jump(); }
 127   bool is_call()                     const { return is_bl() || is_far_call() || is_ic_call(); }
 128   bool is_branch()                   const { return is_b() || is_bl(); }
 129 
 130   // c2 doesn't use fixed registers for safepoint poll address
 131   bool is_safepoint_poll() const {
 132     return true;
 133   }
 134 
 135   bool is_save_all_registers(const RawNativeInstruction** next) const {
 136     const RawNativeInstruction* current = this;
 137 
 138     if (!current->is_stp_preindex()) return false; current = current->next_raw();
 139     for (int i = 28; i >= 0; i -= 2) {
 140       if (!current->is_stp_preindex()) return false; current = current->next_raw();
 141     }
 142 
 143     if (!current->is_adr_aligned())                 return false; current = current->next_raw();
 144     if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
 145     if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
 146 
 147     *next = (RawNativeInstruction*) current;
 148     return true;
 149   }
 150 
 151   bool is_restore_all_registers(const RawNativeInstruction** next) const {
 152     const RawNativeInstruction* current = this;
 153 
 154     for (int i = 0; i <= 28; i += 2) {
 155       if (!current->is_ldp_postindex()) return false; current = current->next_raw();
 156     }
 157     if (!current->is_ldp_postindex()) return false; current = current->next_raw();
 158 
 159     *next = (RawNativeInstruction*) current;
 160     return true;
 161   }
 162 
 163   const RawNativeInstruction* skip_bind_literal() const {
 164     const RawNativeInstruction* current = this;
 165     if (((uintptr_t)current) % wordSize != 0) {
 166       assert(current->is_nop(), "should be");
 167       current = current->next_raw();
 168     }
 169     assert(((uintptr_t)current) % wordSize == 0, "should be"); // bound literal should be aligned
 170     current = current->next_raw()->next_raw();
 171     return current;
 172   }
 173 
 174   bool is_stop(const RawNativeInstruction** next) const {
 175     const RawNativeInstruction* current = this;
 176 
 177     if (!current->is_save_all_registers(&current)) return false;
 178     if (!current->is_ldr_literal())                return false; current = current->next_raw();
 179     if (!current->is_mov_sp())                     return false; current = current->next_raw();
 180     if (!current->is_ldr_literal())                return false; current = current->next_raw();
 181     if (!current->is_br())                         return false; current = current->next_raw();
 182 
 183     current = current->skip_bind_literal();
 184     current = current->skip_bind_literal();
 185 
 186     *next = (RawNativeInstruction*) current;
 187     return true;
 188   }
 189 
 190   bool is_mov_slow(const RawNativeInstruction** next = NULL) const {
 191     const RawNativeInstruction* current = this;
 192 
 193     if (current->is_orr_imm()) {
 194       current = current->next_raw();
 195 
 196     } else if (current->is_movn() || current->is_movz()) {
 197       current = current->next_raw();
 198       int movkCount = 0;
 199       while (current->is_movk()) {
 200         movkCount++;
 201         if (movkCount > 3) return false;
 202         current = current->next_raw();
 203       }
 204 
 205     } else {
 206       return false;
 207     }
 208 
 209     if (next != NULL) {
 210       *next = (RawNativeInstruction*)current;
 211     }
 212     return true;
 213   }
 214 
 215 #ifdef ASSERT
 216   void skip_verify_heapbase(const RawNativeInstruction** next) const {
 217     const RawNativeInstruction* current = this;
 218 
 219     if (CheckCompressedOops) {
 220       if (!current->is_ldr_str_gp_reg_unsigned_imm()) return; current = current->next_raw();
 221       if (!current->is_stp_preindex())      return; current = current->next_raw();
 222       // NOTE: temporary workaround, remove with m6-01?
 223       // skip saving condition flags
 224       current = current->next_raw();
 225       current = current->next_raw();
 226 
 227       if (!current->is_mov_slow(&current))  return;
 228       if (!current->is_cmp_rr())            return; current = current->next_raw();
 229       if (!current->is_b_cond())            return; current = current->next_raw();
 230       if (!current->is_stop(&current))      return;
 231 
 232 #ifdef COMPILER2
 233       if (current->is_nop()) current = current->next_raw();
 234 #endif
 235       // NOTE: temporary workaround, remove with m6-01?
 236       // skip restoring condition flags
 237       current = current->next_raw();
 238       current = current->next_raw();
 239 
 240       if (!current->is_ldp_postindex())     return; current = current->next_raw();
 241       if (!current->is_ldr_str_gp_reg_unsigned_imm()) return; current = current->next_raw();
 242     }
 243 
 244     *next = (RawNativeInstruction*) current;
 245   }
 246 #endif // ASSERT
 247 
 248   bool is_ldr_global_ptr(const RawNativeInstruction** next) const {
 249     const RawNativeInstruction* current = this;
 250 
 251     if (!current->is_mov_slow(&current))            return false;
 252     if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
 253 
 254     *next = (RawNativeInstruction*) current;
 255     return true;
 256   }
 257 
 258   void skip_verify_oop(const RawNativeInstruction** next) const {
 259     const RawNativeInstruction* current = this;
 260 
 261     if (VerifyOops) {
 262       if (!current->is_save_all_registers(&current)) return;
 263 
 264       if (current->is_mov()) {
 265         current = current->next_raw();
 266       }
 267 
 268       if (!current->is_mov_sp())                        return; current = current->next_raw();
 269       if (!current->is_ldr_literal())                   return; current = current->next_raw();
 270       if (!current->is_ldr_global_ptr(&current))        return;
 271       if (!current->is_blr())                           return; current = current->next_raw();
 272       if (!current->is_restore_all_registers(&current)) return;
 273       if (!current->is_b())                             return; current = current->next_raw();
 274 
 275       current = current->skip_bind_literal();
 276     }
 277 
 278     *next = (RawNativeInstruction*) current;
 279   }
 280 
 281   void skip_encode_heap_oop(const RawNativeInstruction** next) const {
 282     const RawNativeInstruction* current = this;
 283 
 284     assert (Universe::heap() != NULL, "java heap should be initialized");
 285 #ifdef ASSERT
 286     current->skip_verify_heapbase(&current);
 287 #endif // ASSERT
 288     current->skip_verify_oop(&current);
 289 
 290     if (Universe::narrow_oop_base() == NULL) {
 291       if (Universe::narrow_oop_shift() != 0) {
 292         if (!current->is_lsr_imm()) return; current = current->next_raw();
 293       } else {
 294         if (current->is_mov()) {
 295           current = current->next_raw();
 296         }
 297       }
 298     } else {
 299       if (!current->is_tst())       return; current = current->next_raw();
 300       if (!current->is_csel())      return; current = current->next_raw();
 301       if (!current->is_sub_shift()) return; current = current->next_raw();
 302       if (Universe::narrow_oop_shift() != 0) {
 303         if (!current->is_lsr_imm())  return; current = current->next_raw();
 304       }
 305     }
 306 
 307     *next = (RawNativeInstruction*) current;
 308   }
 309 
 310   void verify();
 311 
 312   // For unit tests
 313   static void test() {}
 314 
 315  private:
 316 
 317   void check_bits_range(int bits, int scale, int low_bit) const {
 318     assert((0 <= low_bit) && (0 < bits) && (low_bit + bits <= instruction_size_in_bits), "invalid bits range");
 319     assert((0 <= scale) && (scale <= 4), "scale is out of range");
 320   }
 321 
 322   void set_imm(int imm_encoding, int bits, int low_bit) {
 323     int imm_mask = right_n_bits(bits) << low_bit;
 324     assert((imm_encoding & ~imm_mask) == 0, "invalid imm encoding");
 325     set_encoding((encoding() & ~imm_mask) | imm_encoding);
 326   }
 327 
 328  protected:
 329 
 330   // Returns signed immediate from [low_bit .. low_bit + bits - 1] bits of this instruction, scaled by given scale.
 331   int get_signed_imm(int bits, int scale, int low_bit) const {
 332     check_bits_range(bits, scale, low_bit);
 333     int high_bits_to_clean = (instruction_size_in_bits - (low_bit + bits));
 334     return encoding() << high_bits_to_clean >> (high_bits_to_clean + low_bit) << scale;
 335   }
 336 
 337   // Puts given signed immediate into the [low_bit .. low_bit + bits - 1] bits of this instruction.
 338   void set_signed_imm(int value, int bits, int scale, int low_bit) {
 339     set_imm(Assembler::encode_imm(value, bits, scale, low_bit), bits, low_bit);
 340   }
 341 
 342   // Returns unsigned immediate from [low_bit .. low_bit + bits - 1] bits of this instruction, scaled by given scale.
 343   int get_unsigned_imm(int bits, int scale, int low_bit) const {
 344     check_bits_range(bits, scale, low_bit);
 345     return ((encoding() >> low_bit) & right_n_bits(bits)) << scale;
 346   }
 347 
 348   // Puts given unsigned immediate into the [low_bit .. low_bit + bits - 1] bits of this instruction.
 349   void set_unsigned_imm(int value, int bits, int scale, int low_bit) {
 350     set_imm(Assembler::encode_unsigned_imm(value, bits, scale, low_bit), bits, low_bit);
 351   }
 352 
 353   int get_signed_offset(int bits, int low_bit) const {
 354     return get_signed_imm(bits, 2, low_bit);
 355   }
 356 
 357   void set_signed_offset(int offset, int bits, int low_bit) {
 358     set_signed_imm(offset, bits, 2, low_bit);
 359   }
 360 };
 361 
 362 inline RawNativeInstruction* rawNativeInstruction_at(address address) {
 363   RawNativeInstruction* instr = RawNativeInstruction::at(address);
 364 #ifdef ASSERT
 365   instr->verify();
 366 #endif // ASSERT
 367   return instr;
 368 }
 369 
 370 // -------------------------------------------------------------------
 371 
 372 // Load/store register (unsigned scaled immediate)
 373 class NativeMovRegMem: public RawNativeInstruction {
 374  private:
 375   int get_offset_scale() const {
 376     return get_unsigned_imm(2, 0, 30);
 377   }
 378 
 379  public:
 380   int offset() const {
 381     return get_unsigned_imm(12, get_offset_scale(), 10);
 382   }
 383 
 384   void set_offset(int x);
 385 
 386   void add_offset_in_bytes(int add_offset) {
 387     set_offset(offset() + add_offset);
 388   }
 389 };
 390 
 391 inline NativeMovRegMem* nativeMovRegMem_at(address address) {
 392   const RawNativeInstruction* instr = rawNativeInstruction_at(address);
 393 
 394 #ifdef COMPILER1
 395     // NOP required for C1 patching
 396     if (instr->is_nop()) {
 397       instr = instr->next_raw();
 398     }
 399 #endif
 400 
 401   instr->skip_encode_heap_oop(&instr);
 402 
 403   assert(instr->is_ldr_str_reg_unsigned_imm(), "must be");
 404   return (NativeMovRegMem*)instr;
 405 }
 406 
 407 // -------------------------------------------------------------------
 408 
 409 class NativeInstruction : public RawNativeInstruction {
 410 public:
 411   static NativeInstruction* at(address address) {
 412     return (NativeInstruction*)address;
 413   }
 414 
 415 public:
 416   // No need to consider indirections while parsing NativeInstruction
 417   address next_instruction_address() const {
 418     return next_raw_instruction_address();
 419   }
 420 
 421   // next() is no longer defined to avoid confusion.
 422   //
 423   // The front end and most classes except for those defined in nativeInst_arm
 424   // or relocInfo_arm should only use next_instruction_address(), skipping
 425   // over composed instruction and ignoring back-end extensions.
 426   //
 427   // The back-end can use next_raw() when it knows the instruction sequence
 428   // and only wants to skip a single native instruction.
 429 };
 430 
 431 inline NativeInstruction* nativeInstruction_at(address address) {
 432   NativeInstruction* instr = NativeInstruction::at(address);
 433 #ifdef ASSERT
 434   instr->verify();
 435 #endif // ASSERT
 436   return instr;
 437 }
 438 
 439 // -------------------------------------------------------------------
 440 class NativeInstructionLdrLiteral: public NativeInstruction {
 441  public:
 442   address literal_address() {
 443     address la = instruction_address() + get_signed_offset(19, 5);
 444     assert(la != instruction_address(), "literal points to instruction");
 445     return la;
 446   }
 447 
 448   address after_literal_address() {
 449     return literal_address() + wordSize;
 450   }
 451 
 452   void set_literal_address(address addr, address pc) {
 453     assert(is_ldr_literal(), "must be");
 454     int opc = (encoding() >> 30) & 0x3;
 455     assert (opc != 0b01 || addr == pc || ((uintx)addr & 7) == 0, "ldr target should be aligned");
 456     set_signed_offset(addr - pc, 19, 5);
 457   }
 458 
 459   void set_literal_address(address addr) {
 460     set_literal_address(addr, instruction_address());
 461   }
 462 
 463   address literal_value() {
 464     return *(address*)literal_address();
 465   }
 466 
 467   void set_literal_value(address dest) {
 468     *(address*)literal_address() = dest;
 469   }
 470 };
 471 
 472 inline NativeInstructionLdrLiteral* nativeLdrLiteral_at(address address) {
 473   assert(nativeInstruction_at(address)->is_ldr_literal(), "must be");
 474   return (NativeInstructionLdrLiteral*)address;
 475 }
 476 
 477 // -------------------------------------------------------------------
 478 // Common class for branch instructions with 26-bit immediate offset: B (unconditional) and BL
 479 class NativeInstructionBranchImm26: public NativeInstruction {
 480  public:
 481   address destination(int adj = 0) const {
 482     return instruction_address() + get_signed_offset(26, 0) + adj;
 483   }
 484 
 485   void set_destination(address dest) {
 486     intptr_t offset = (intptr_t)(dest - instruction_address());
 487     assert((offset & 0x3) == 0, "should be aligned");
 488     set_signed_offset(offset, 26, 0);
 489   }
 490 };
 491 
 492 inline NativeInstructionBranchImm26* nativeB_at(address address) {
 493   assert(nativeInstruction_at(address)->is_b(), "must be");
 494   return (NativeInstructionBranchImm26*)address;
 495 }
 496 
 497 inline NativeInstructionBranchImm26* nativeBL_at(address address) {
 498   assert(nativeInstruction_at(address)->is_bl(), "must be");
 499   return (NativeInstructionBranchImm26*)address;
 500 }
 501 
 502 // -------------------------------------------------------------------
 503 class NativeInstructionAdrLR: public NativeInstruction {
 504  public:
 505   // Returns address which is loaded into LR by this instruction.
 506   address target_lr_value() {
 507     return instruction_address() + get_signed_offset(19, 5);
 508   }
 509 };
 510 
 511 inline NativeInstructionAdrLR* nativeAdrLR_at(address address) {
 512   assert(nativeInstruction_at(address)->is_adr_aligned_lr(), "must be");
 513   return (NativeInstructionAdrLR*)address;
 514 }
 515 
 516 // -------------------------------------------------------------------
 517 class RawNativeCall: public NativeInstruction {
 518  public:
 519 
 520   address return_address() const {
 521     if (is_bl()) {
 522       return next_raw_instruction_address();
 523 
 524     } else if (is_far_call()) {
 525 #ifdef COMPILER2
 526       if (next_raw()->is_blr()) {
 527         // ldr_literal; blr; ret_addr: b skip_literal;
 528         return addr_at(2 * instruction_size);
 529       }
 530 #endif
 531       assert(next_raw()->is_adr_aligned_lr() && next_raw()->next_raw()->is_br(), "must be");
 532       return nativeLdrLiteral_at(instruction_address())->after_literal_address();
 533 
 534     } else if (is_ic_call()) {
 535       return nativeAdrLR_at(instruction_address())->target_lr_value();
 536 
 537     } else {
 538       ShouldNotReachHere();
 539       return NULL;
 540     }
 541   }
 542 
 543   address destination(int adj = 0) const {
 544     if (is_bl()) {
 545       return nativeBL_at(instruction_address())->destination(adj);
 546 
 547     } else if (is_far_call()) {
 548       return nativeLdrLiteral_at(instruction_address())->literal_value();
 549 
 550     } else if (is_adr_aligned_lr()) {
 551       RawNativeInstruction *next = next_raw();
 552       if (next->is_b()) {
 553         // ic_near_call
 554         return nativeB_at(next->instruction_address())->destination(adj);
 555       } else if (next->is_far_jump()) {
 556         // ic_far_call
 557         return nativeLdrLiteral_at(next->instruction_address())->literal_value();
 558       }
 559     }
 560     ShouldNotReachHere();
 561     return NULL;
 562   }
 563 
 564   void set_destination(address dest) {
 565     if (is_bl()) {
 566       nativeBL_at(instruction_address())->set_destination(dest);
 567       return;
 568     }
 569     if (is_far_call()) {
 570       nativeLdrLiteral_at(instruction_address())->set_literal_value(dest);
 571       OrderAccess::storeload(); // overkill if caller holds lock?
 572       return;
 573     }
 574     if (is_adr_aligned_lr()) {
 575       RawNativeInstruction *next = next_raw();
 576       if (next->is_b()) {
 577         // ic_near_call
 578         nativeB_at(next->instruction_address())->set_destination(dest);
 579         return;
 580       }
 581       if (next->is_far_jump()) {
 582         // ic_far_call
 583         nativeLdrLiteral_at(next->instruction_address())->set_literal_value(dest);
 584         OrderAccess::storeload(); // overkill if caller holds lock?
 585         return;
 586       }
 587     }
 588     ShouldNotReachHere();
 589   }
 590 
 591   void set_destination_mt_safe(address dest) {
 592     assert(CodeCache::contains(dest), "call target should be from code cache (required by ic_call and patchable_call)");
 593     set_destination(dest);
 594   }
 595 
 596   void verify() {
 597     assert(RawNativeInstruction::is_call(), "should be");
 598   }
 599 
 600   void verify_alignment() {
 601     // Nothing to do on ARM
 602   }
 603 };
 604 
 605 inline RawNativeCall* rawNativeCall_at(address address) {
 606   RawNativeCall * call = (RawNativeCall*)address;
 607   call->verify();
 608   return call;
 609 }
 610 
 611 class NativeCall: public RawNativeCall {
 612  public:
 613 
 614   // NativeCall::next_instruction_address() is used only to define the
 615   // range where to look for the relocation information. We need not
 616   // walk over composed instructions (as long as the relocation information
 617   // is associated to the first instruction).
 618   address next_instruction_address() const {
 619     return next_raw_instruction_address();
 620   }
 621 
 622   static bool is_call_before(address return_address);
 623 };
 624 
 625 inline NativeCall* nativeCall_at(address address) {
 626   NativeCall * call = (NativeCall*)address;
 627   call->verify();
 628   return call;
 629 }
 630 
 631 NativeCall* nativeCall_before(address return_address);
 632 
 633 // -------------------------------------------------------------------
 634 class NativeGeneralJump: public NativeInstruction {
 635  public:
 636 
 637   address jump_destination() const {
 638     return nativeB_at(instruction_address())->destination();
 639   }
 640 
 641   static void replace_mt_safe(address instr_addr, address code_buffer);
 642 
 643   static void insert_unconditional(address code_pos, address entry);
 644 
 645 };
 646 
 647 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 648   assert(nativeInstruction_at(address)->is_b(), "must be");
 649   return (NativeGeneralJump*)address;
 650 }
 651 
 652 // -------------------------------------------------------------------
 653 class RawNativeJump: public NativeInstruction {
 654  public:
 655 
 656   address jump_destination(int adj = 0) const {
 657     if (is_b()) {
 658       address a = nativeB_at(instruction_address())->destination(adj);
 659       // Jump destination -1 is encoded as a jump to self
 660       if (a == instruction_address()) {
 661         return (address)-1;
 662       }
 663       return a;
 664     } else {
 665       assert(is_far_jump(), "should be");
 666       return nativeLdrLiteral_at(instruction_address())->literal_value();
 667     }
 668   }
 669 
 670   void set_jump_destination(address dest) {
 671     if (is_b()) {
 672       // Jump destination -1 is encoded as a jump to self
 673       if (dest == (address)-1) {
 674         dest = instruction_address();
 675       }
 676       nativeB_at(instruction_address())->set_destination(dest);
 677     } else {
 678       assert(is_far_jump(), "should be");
 679       nativeLdrLiteral_at(instruction_address())->set_literal_value(dest);
 680     }
 681   }
 682 };
 683 
 684 inline RawNativeJump* rawNativeJump_at(address address) {
 685   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
 686   return (RawNativeJump*)address;
 687 }
 688 
 689 // -------------------------------------------------------------------
 690 class NativeMovConstReg: public NativeInstruction {
 691 
 692   NativeMovConstReg *adjust() const {
 693     return (NativeMovConstReg *)adjust(this);
 694   }
 695 
 696  public:
 697 
 698   static RawNativeInstruction *adjust(const RawNativeInstruction *ni) {
 699 #ifdef COMPILER1
 700     // NOP required for C1 patching
 701     if (ni->is_nop()) {
 702       return ni->next_raw();
 703     }
 704 #endif
 705     return (RawNativeInstruction *)ni;
 706   }
 707 
 708   intptr_t _data() const;
 709   void set_data(intptr_t x);
 710 
 711   intptr_t data() const {
 712     return adjust()->_data();
 713   }
 714 
 715   bool is_pc_relative() {
 716     return adjust()->is_ldr_literal();
 717   }
 718 
 719   void _set_pc_relative_offset(address addr, address pc) {
 720     assert(is_ldr_literal(), "must be");
 721     nativeLdrLiteral_at(instruction_address())->set_literal_address(addr, pc);
 722   }
 723 
 724   void set_pc_relative_offset(address addr, address pc) {
 725     NativeMovConstReg *ni = adjust();
 726     int dest_adj = ni->instruction_address() - instruction_address();
 727     ni->_set_pc_relative_offset(addr, pc + dest_adj);
 728   }
 729 
 730   address _next_instruction_address() const {
 731 #ifdef COMPILER2
 732     if (is_movz()) {
 733       // narrow constant
 734       RawNativeInstruction* ni = next_raw();
 735       assert(ni->is_movk(), "movz;movk expected");
 736       return ni->next_raw_instruction_address();
 737     }
 738 #endif
 739     assert(is_ldr_literal(), "must be");
 740     return NativeInstruction::next_raw_instruction_address();
 741   }
 742 
 743   address next_instruction_address() const {
 744     return adjust()->_next_instruction_address();
 745   }
 746 };
 747 
 748 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 749   RawNativeInstruction* ni = rawNativeInstruction_at(address);
 750 
 751   ni = NativeMovConstReg::adjust(ni);
 752 
 753   assert(ni->is_mov_slow() || ni->is_ldr_literal(), "must be");
 754   return (NativeMovConstReg*)address;
 755 }
 756 
 757 // -------------------------------------------------------------------
 758 class NativeJump: public RawNativeJump {
 759  public:
 760 
 761   static void check_verified_entry_alignment(address entry, address verified_entry);
 762 
 763   static void patch_verified_entry(address entry, address verified_entry, address dest);
 764 };
 765 
 766 inline NativeJump* nativeJump_at(address address) {
 767   assert(nativeInstruction_at(address)->is_jump(), "must be");
 768   return (NativeJump*)address;
 769 }
 770 
 771 #endif // CPU_ARM_VM_NATIVEINST_ARM_64_HPP