1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "nativeInst_aarch32.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/handles.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/stubRoutines.hpp"
  36 #include "utilities/ostream.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 
  41 // LIRAssembler fills patching site with nops up to NativeCall::instruction_size
  42 int NativeCall::instruction_size = 5 * arm_insn_sz;
  43 #define patching_copy_buff_len (NativeCall::instruction_size)
  44 
  45 NativeInstruction* NativeInstruction::from(address addr) {
  46   return (NativeInstruction*) addr;
  47 }
  48 
  49 //-------------------------------------------------------------------
  50 
  51 void NativeCall::init() {
  52   instruction_size = (VM_Version::features() & (FT_ARMV6T2 | FT_ARMV7) ? 3 : 5) * arm_insn_sz;
  53 }
  54 
  55 void NativeCall::verify() {
  56   if (!is_call()) {
  57     fatal("not a call");
  58   }
  59 }
  60 
  61 address NativeCall::destination() const {
  62   assert(is_call(), "not a call");
  63   if (NativeImmCall::is_at(addr())) {
  64     return NativeImmCall::from(addr())->destination();
  65   } else if (NativeMovConstReg::is_at(addr())) {
  66     return address(NativeMovConstReg::from(addr())->data());
  67   } else if (NativeTrampolineCall::is_at(addr())) {
  68     return NativeTrampolineCall::from(addr())->destination();
  69   }
  70   ShouldNotReachHere();
  71   return NULL;
  72 }
  73 
  74 void NativeCall::set_destination(address dest) {
  75   assert(is_call(), "not a call");
  76   if (NativeImmCall::is_at(addr())) {
  77     NativeImmCall::from(addr())->set_destination(dest);
  78   } else if (NativeMovConstReg::is_at(addr())) {
  79     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
  80   } else if (NativeTrampolineCall::is_at(addr())) {
  81     NativeTrampolineCall::from(addr())->set_destination(dest);
  82   } else {
  83     ShouldNotReachHere();
  84   }
  85 }
  86 
  87 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
  88   assert(is_call(), "not a call");
  89 
  90   // patching should be not only safe (i.e. this call could be executed by some thread),
  91   // but it also should be atomic (some other thread could call NativeCall::destination()
  92   // and see valid destination value)
  93 
  94   if (NativeImmCall::is_at(addr())) {
  95     NativeImmCall::from(addr())->set_destination(dest);
  96     ICache::invalidate_word(addr());
  97   } else if (NativeTrampolineCall::is_at(addr())) {
  98     NativeTrampolineCall::from(addr())->set_destination_mt_safe(dest);
  99   } else {
 100     ShouldNotReachHere();
 101   }
 102 }
 103 
 104 void NativeCall::insert(address code_pos, address entry) {
 105   Unimplemented();
 106 }
 107 
 108 bool NativeCall::is_call_before(address return_address) {
 109   return is_at(return_address - NativeImmCall::instruction_size) ||
 110     is_at(return_address - NativeCall::instruction_size);
 111 }
 112 
 113 address NativeCall::next_instruction_address() const {
 114   assert(is_call(), "not a call");
 115   if (NativeImmCall::is_at(addr())) {
 116     return NativeImmCall::from(addr())->next_instruction_address();
 117   } else if (NativeMovConstReg::is_at(addr())) {
 118     NativeMovConstReg *nm = NativeMovConstReg::from(addr());
 119     address next_instr = nm->next_instruction_address();
 120     assert(NativeRegCall::is_at(next_instr), "should be");
 121     return NativeRegCall::from(next_instr)->next_instruction_address();
 122   } else if (NativeTrampolineCall::is_at(addr())) {
 123     return NativeTrampolineCall::from(addr())->next_instruction_address();
 124   } else {
 125     ShouldNotReachHere();
 126     return NULL;
 127   }
 128 }
 129 
 130 address NativeCall::return_address() const {
 131   return next_instruction_address();
 132 }
 133 
 134 bool NativeCall::is_at(address addr) {
 135   if (NativeImmCall::is_at(addr)) {
 136     return true;
 137   } else if (NativeMovConstReg::is_at(addr)) {
 138     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 139     address next_instr = nm->next_instruction_address();
 140     return NativeRegCall::is_at(next_instr) &&
 141       NativeRegCall::from(next_instr)->destination() == nm->destination();
 142   } else if (NativeTrampolineCall::is_at(addr)) {
 143     return true;
 144   }
 145   return false;
 146 }
 147 
 148 NativeCall* NativeCall::from(address addr) {
 149   assert(NativeCall::is_at(addr), "");
 150   return (NativeCall*) addr;
 151 }
 152 
 153 //-------------------------------------------------------------------
 154 
 155 address NativeTrampolineCall::destination() const {
 156   assert(is_at(addr()), "not call");
 157   return (address) uint_at(8);
 158 }
 159 
 160 void NativeTrampolineCall::set_destination(address dest) {
 161   assert(is_at(addr()), "not call");
 162   set_uint_at(8, (uintptr_t) dest);
 163 }
 164 
 165 void NativeTrampolineCall::set_destination_mt_safe(address dest, bool assert_lock) {
 166   assert(is_at(addr()), "not call");
 167   set_destination(dest);
 168   // FIXME invalidate data cache
 169 }
 170 
 171 bool NativeTrampolineCall::is_at(address addr) {
 172   return (as_uint(addr    ) & ~0xffu) == 0xe28fe000  // add     lr, pc, #disp
 173        && as_uint(addr + 4)          == 0xe51ff004; // ldr     pc, [pc, -4]
 174 }
 175 
 176 NativeTrampolineCall* NativeTrampolineCall::from(address addr) {
 177   assert(NativeTrampolineCall::is_at(addr), "");
 178   return (NativeTrampolineCall*) addr;
 179 }
 180 
 181 //-------------------------------------------------------------------
 182 
 183 address NativeImmCall::destination() const {
 184   assert(is_imm_call(), "not call");
 185   uint32_t insn = as_uint();
 186   intptr_t off = Instruction_aarch32::sextract(insn, 23, 0);
 187   address destination = addr() + 8 + (off << 2);
 188   return destination;
 189 }
 190 
 191 void NativeImmCall::set_destination(address dest) {
 192   assert(is_imm_call(), "not call");
 193   patch_offset_to(dest);
 194 }
 195 
 196 bool NativeImmCall::is_at(address addr) {
 197   return Instruction_aarch32::extract(as_uint(addr), 27, 24)  == 0b1011;
 198 }
 199 
 200 NativeImmCall* NativeImmCall::from(address addr) {
 201   assert(NativeImmCall::is_at(addr), "");
 202   return (NativeImmCall*) addr;
 203 }
 204 
 205 //-------------------------------------------------------------------
 206 
 207 Register NativeRegCall::destination() const {
 208   assert(is_reg_call(), "not call");
 209   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 210 }
 211 
 212 bool NativeRegCall::is_at(address addr) {
 213   unsigned insn = as_uint(addr);
 214   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0011;
 215 }
 216 
 217 NativeRegCall* NativeRegCall::from(address addr) {
 218   assert(NativeRegCall::is_at(addr), "");
 219   return (NativeRegCall*) addr;
 220 }
 221 
 222 //-------------------------------------------------------------------
 223 
 224 void NativeMovConstReg::verify() {
 225   if (!is_mov_const_reg()) {
 226     fatal("not a mov const reg");
 227   }
 228 }
 229 
 230 intptr_t NativeMovConstReg::data() const {
 231   return (intptr_t) MacroAssembler::target_addr_for_insn(addr());
 232 }
 233 
 234 void NativeMovConstReg::set_data(intptr_t x) {
 235   MacroAssembler::pd_patch_instruction(addr(), (address)x);
 236   ICache::invalidate_range(addr(), max_instruction_size);
 237 };
 238 
 239 void NativeMovConstReg::print() {
 240   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 241                 p2i(addr()), data());
 242 }
 243 
 244 Register NativeMovConstReg::destination() const {
 245   return (Register) Instruction_aarch32::extract(as_uint(), 15, 12);
 246 }
 247 
 248 NativeMovConstReg* NativeMovConstReg::from(address addr) {
 249   assert(NativeMovConstReg::is_at(addr), "");
 250   return (NativeMovConstReg*) addr;
 251 }
 252 
 253 bool NativeMovConstReg::is_movw_movt_at(address addr) {
 254   unsigned insn = as_uint(addr);
 255   unsigned insn2 = as_uint(addr + arm_insn_sz);
 256   return Instruction_aarch32::extract(insn,  27, 20) == 0b00110000 && //mov
 257          Instruction_aarch32::extract(insn2, 27, 20) == 0b00110100;   //movt
 258 }
 259 
 260 bool NativeMovConstReg::is_ldr_literal_at(address addr) {
 261   unsigned insn = as_uint(addr);
 262   return (Instruction_aarch32::extract(insn, 27, 16) & 0b111001011111) == 0b010000011111;
 263 }
 264 
 265 bool NativeMovConstReg::is_mov_n_three_orr_at(address addr) {
 266   return (Instruction_aarch32::extract(as_uint(addr), 27, 16) & 0b111111101111) == 0b001110100000 &&
 267           Instruction_aarch32::extract(as_uint(addr+arm_insn_sz), 27, 20) == 0b00111000 &&
 268           Instruction_aarch32::extract(as_uint(addr+2*arm_insn_sz), 27, 20) == 0b00111000 &&
 269           Instruction_aarch32::extract(as_uint(addr+3*arm_insn_sz), 27, 21) == 0b0011100;
 270 }
 271 
 272 bool NativeMovConstReg::is_at(address addr) {
 273   return is_ldr_literal_at(addr) ||
 274           is_movw_movt_at(addr) ||
 275           is_mov_n_three_orr_at(addr);
 276 }
 277 
 278 //-------------------------------------------------------------------
 279 // TODO review
 280 address NativeMovRegMem::instruction_address() const {
 281   return addr();
 282 }
 283 
 284 int NativeMovRegMem::offset() const  {
 285   address pc = addr();
 286   unsigned insn = *(unsigned*)pc;
 287   if (Instruction_aarch32::extract(insn, 28, 24) == 0b10000) {
 288     address addr = MacroAssembler::target_addr_for_insn(pc);
 289     return *addr;
 290   } else {
 291     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(addr());
 292   }
 293 }
 294 
 295 void NativeMovRegMem::set_offset(int x) {
 296   address pc = addr();
 297   // FIXME seems not very roboust
 298   MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
 299   ICache::invalidate_range(addr(), instruction_size);
 300 }
 301 
 302 void NativeMovRegMem::verify() {
 303 #ifdef ASSERT
 304   address dest = MacroAssembler::target_addr_for_insn(addr());
 305 #endif
 306 }
 307 
 308 //--------------------------------------------------------------------------------
 309 
 310 void NativeJump::verify() {
 311   if (!is_jump()) {
 312     fatal("not a call");
 313   }
 314 }
 315 
 316 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 317 }
 318 
 319 address NativeJump::jump_destination() const {
 320   assert(is_jump(), "not a call");
 321   if (NativeImmJump::is_at(addr())) {
 322     return NativeImmJump::from(addr())->destination();
 323   } else if (NativeMovConstReg::is_at(addr())) {
 324     return address(NativeMovConstReg::from(addr())->data());
 325   }
 326   ShouldNotReachHere();
 327   return NULL;
 328 }
 329 
 330 void NativeJump::set_jump_destination(address dest) {
 331   assert(is_jump(), "not a call");
 332   if (NativeImmJump::is_at(addr())) {
 333     NativeImmJump::from(addr())->set_destination(dest);
 334   } else if (NativeMovConstReg::is_at(addr())) {
 335     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
 336   } else {
 337     ShouldNotReachHere();
 338   }
 339 }
 340 
 341 address NativeJump::next_instruction_address() const {
 342   assert(is_jump(), "not a call");
 343   if (NativeImmJump::is_at(addr())) {
 344     return NativeImmJump::from(addr())->next_instruction_address();
 345   } else if (NativeMovConstReg::is_at(addr())) {
 346     address after_move = NativeMovConstReg::from(addr())->next_instruction_address();
 347     assert(NativeRegJump::is_at(after_move), "should be jump");
 348     return NativeRegJump::from(after_move)->next_instruction_address();
 349   }
 350   ShouldNotReachHere();
 351   return NULL;
 352 }
 353 
 354 bool NativeJump::is_at(address addr) {
 355   if (NativeImmJump::is_at(addr)) {
 356     return true;
 357   }
 358   if (NativeMovConstReg::is_at(addr)) {
 359     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 360     address next_instr = nm->next_instruction_address();
 361     return NativeRegJump::is_at(next_instr) &&
 362       NativeRegJump::from(next_instr)->destination() == nm->destination();
 363   }
 364   return false;
 365 }
 366 
 367 NativeJump* NativeJump::from(address addr) {
 368   assert(NativeJump::is_at(addr), "");
 369   return (NativeJump*) addr;
 370 }
 371 
 372 // MT-safe inserting of a jump over a jump or a nop (used by
 373 // nmethod::make_not_entrant_or_zombie)
 374 
 375 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 376 
 377   assert(dest == SharedRuntime::get_handle_wrong_method_stub(),
 378      "expected fixed destination of patch");
 379   assert(NativeInstruction::from(verified_entry)->is_jump_or_nop() ||
 380       NativeInstruction::from(verified_entry)->is_sigill_zombie_not_entrant(),
 381          "Aarch32 cannot replace non-jump with jump");
 382 
 383   // Patch this nmethod atomically.
 384   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 385     assert((((intptr_t) dest & 0x3) == 0) && (((intptr_t) verified_entry & 0x3) == 0),
 386         "addresses should be aligned on 4");
 387     ptrdiff_t disp = (dest - verified_entry - 8) >> 2;
 388     guarantee((-(1 << 23) <= disp) && (disp < (1 << 23)), "branch overflow");
 389 
 390     unsigned int insn = (0b11101010 << 24) | (disp & 0xffffff);
 391     *(unsigned int*)verified_entry = insn;
 392   } else {
 393     // We use an illegal instruction for marking a method as
 394     // not_entrant or zombie.
 395     NativeIllegalInstruction::insert(verified_entry);
 396   }
 397 
 398   ICache::invalidate_range(verified_entry, instruction_size);
 399 }
 400 
 401 //-------------------------------------------------------------------
 402 
 403 bool NativeBranchType::is_branch_type(uint32_t insn) {
 404   return Instruction_aarch32::extract(insn, 27, 20) == 0b00010010 &&
 405     Instruction_aarch32::extract(insn, 19, 8) == 0b111111111111;
 406 }
 407 
 408 void NativeBranchType::patch_offset_to(address dest) {
 409   uint32_t insn = as_uint();
 410   const intptr_t off = (dest - (addr() + 8));
 411   assert((off & 3) == 0, "should be");
 412   assert(-32 * 1024 * 1024 <= off && off < 32 * 1024 * 1042,
 413       "new offset should fit in instruction");
 414 
 415   const unsigned off_mask = ((1U << 24) - 1);
 416   insn &= ~off_mask; // mask off offset part
 417   insn |= ((unsigned) off >> 2) & off_mask;
 418 
 419   set_uint(insn);
 420   ICache::invalidate_range(addr_at(0), instruction_size);
 421 }
 422 
 423 //-------------------------------------------------------------------
 424 
 425 address NativeImmJump::destination() const {
 426   assert(is_imm_jump(), "not jump");
 427   return addr() + 8 + 4 * Instruction_aarch32::sextract(as_uint(), 23, 0);
 428 }
 429 
 430 void NativeImmJump::set_destination(address addr) {
 431   assert(is_imm_jump(), "");
 432   patch_offset_to(addr);
 433 }
 434 
 435 bool NativeImmJump::is_at(address addr) {
 436   unsigned insn = as_uint(addr);
 437   return Instruction_aarch32::extract(insn, 27, 24)  == 0b1010;
 438 }
 439 
 440 NativeImmJump* NativeImmJump::from(address addr) {
 441   assert(NativeImmJump::is_at(addr), "");
 442   return (NativeImmJump*) addr;
 443 }
 444 
 445 //-------------------------------------------------------------------
 446 
 447 bool NativeRegJump::is_at(address addr) {
 448   unsigned insn = as_uint(addr);
 449   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0001;
 450 }
 451 
 452 NativeRegJump* NativeRegJump::from(address addr) {
 453   assert(NativeRegJump::is_at(addr), "");
 454   return (NativeRegJump*) addr;
 455 }
 456 
 457 Register NativeRegJump::destination() const {
 458   assert(is_reg_jump(), "");
 459   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 460 }
 461 
 462 //-------------------------------------------------------------------
 463 
 464 bool NativeInstruction::is_safepoint_poll() {
 465   // a safepoint_poll is implemented in two steps as
 466   //
 467   // movw(r9, polling_page & 0xffff);
 468   // movt(r9, polling_page >> 16);
 469   // ldr(r9, [r9, #0]);
 470   //
 471   // We can rely on this instructions order until we have only C1
 472 
 473     if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2))  {
 474       bool res = false;
 475       unsigned paddr = (unsigned)os::get_polling_page();
 476 
 477       unsigned addr_lo = paddr & 0xffff;
 478       unsigned addr_hi = paddr >> 16;
 479 
 480       Register scratch = rscratch1;
 481 
 482       res =        from(addr() - 0x8)->is_movw(scratch, addr_lo);
 483       res = res && from(addr() - 0x4)->is_movt(scratch, addr_hi);
 484       res = res && from(addr() - 0x0)->is_ldr(scratch, Address(scratch));
 485 
 486       return res;
 487   } else {
 488     assert(false, "not implemented");
 489     return false;
 490   }
 491 }
 492 
 493 bool NativeInstruction::is_movt(Register dst, unsigned imm, Assembler::Condition cond) {
 494   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110100;
 495   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 496   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 497   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 498   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 499 
 500   return a1 && a2 && a3 && a4 && a5;
 501 }
 502 
 503 bool NativeInstruction::is_movw(Register dst, unsigned imm, Assembler::Condition cond) {
 504   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110000;
 505   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 506   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 507   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 508   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 509 
 510   return a1 && a2 && a3 && a4 && a5;
 511 }
 512 
 513 bool NativeInstruction::is_ldr(Register dst, Address addr, Assembler::Condition cond) {
 514     assert(addr.get_mode() == Address::imm, "unimplemented");
 515     assert(addr.get_wb_mode() == Address::off, "unimplemented");
 516     assert(addr.index() == noreg, "unimplemented");
 517     assert(addr.offset() == 0, "unimplemented");
 518 
 519     bool b0 = Instruction_aarch32::extract(uint_at(0), 24, 24) == 1; //P
 520     bool b1 = Instruction_aarch32::extract(uint_at(0), 23, 23) == 1; //U
 521     bool b2 = Instruction_aarch32::extract(uint_at(0), 21, 21) == 0; //W
 522     bool b3 = Instruction_aarch32::extract(uint_at(0), 19, 16) == (unsigned)addr.base();
 523     bool b4 = Instruction_aarch32::extract(uint_at(0), 11, 0) == 0;
 524 
 525     bool a1 = b0 && b1 && b2 && b3 && b4; //Address encoding
 526 
 527     bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 528     bool a3 = Instruction_aarch32::extract(uint_at(0), 20, 20) == 1;
 529     bool a4 = Instruction_aarch32::extract(uint_at(0), 22, 22) == 0;
 530     bool a5 = Instruction_aarch32::extract(uint_at(0), 27, 25) == 0b010;
 531     bool a6 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 532 
 533     return a1 && a2 && a3 && a4 && a5 && a6;
 534 }
 535 
 536 
 537 bool NativeInstruction::is_movt() {
 538   return Instruction_aarch32::extract(int_at(0), 27, 20) == 0b00110100;
 539 }
 540 
 541 bool NativeInstruction::is_orr() {
 542   return Instruction_aarch32::extract(int_at(0), 27, 21) == 0b0011100;
 543 }
 544 
 545 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 546   return as_uint() == 0xe7fdeafd; // udf #0xdead
 547 }
 548 
 549 void NativeIllegalInstruction::insert(address code_pos) {
 550   *(juint*)code_pos = 0xe7fdeafd; // udf #0xdead
 551 }
 552 
 553 //-------------------------------------------------------------------
 554 
 555 void NativeGeneralJump::verify() {  }
 556 
 557 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 558   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 559 
 560   CodeBuffer cb(code_pos, instruction_size);
 561   MacroAssembler a(&cb);
 562 
 563   a.b(entry);
 564 
 565   ICache::invalidate_range(code_pos, instruction_size);
 566 }
 567 
 568 // MT-safe patching of a long jump instruction.
 569 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 570   const address patching_switch_addr = code_buffer + patching_copy_buff_len;
 571   NativeImmJump* patching_switch = NativeImmJump::from(patching_switch_addr);
 572   assert(!NativeInstruction::from(instr_addr)->is_patched_already(), "not patched yet");
 573   assert(patching_switch->destination() == patching_switch_addr + NativeInstruction::arm_insn_sz,
 574          "switch should be branch to next instr at this point");
 575   patching_switch->set_destination(instr_addr + patching_copy_buff_len);
 576   ICache::invalidate_word(patching_switch_addr);
 577 
 578   NativeImmJump* nj = NativeImmJump::from(instr_addr); // checking that it is a jump
 579   nj->set_destination(code_buffer);
 580   ICache::invalidate_word(instr_addr);
 581 
 582   assert(NativeInstruction::from(instr_addr)->is_patched_already(), "should patched already");
 583 }
 584 
 585 bool NativeInstruction::is_patched_already() const {
 586   if (NativeImmJump::is_at(addr())) {
 587     address maybe_copy_buff = NativeImmJump::from(addr())->destination();
 588     address maybe_patching_switch = maybe_copy_buff + patching_copy_buff_len;
 589     if (NativeImmJump::is_at(maybe_patching_switch)) {
 590       return NativeImmJump::from(maybe_patching_switch)->destination() == addr() + patching_copy_buff_len;
 591     }
 592   }
 593   return false;
 594 }