1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "nativeInst_aarch32.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/handles.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/stubRoutines.hpp"
  36 #include "utilities/ostream.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 
  41 // LIRAssembler fills patching site with nops up to NativeCall::instruction_size
  42 int NativeCall::instruction_size = 5 * arm_insn_sz;
  43 #define patching_copy_buff_len (NativeCall::instruction_size)
  44 
  45 NativeInstruction* NativeInstruction::from(address addr) {
  46   return (NativeInstruction*) addr;
  47 }
  48 
  49 //-------------------------------------------------------------------
  50 
  51 void NativeCall::init() {
  52   instruction_size = (VM_Version::features() & (FT_ARMV6T2 | FT_ARMV7) ? 3 : 5) * arm_insn_sz;
  53 }
  54 
  55 void NativeCall::verify() {
  56   if (!is_call()) {
  57     fatal("not a call");
  58   }
  59 }
  60 
  61 address NativeCall::destination() const {
  62   assert(is_call(), "not a call");
  63   if (NativeImmCall::is_at(addr())) {
  64     return NativeImmCall::from(addr())->destination();
  65   } else if (NativeMovConstReg::is_at(addr())) {
  66     return address(NativeMovConstReg::from(addr())->data());
  67   } else if (NativeTrampolineCall::is_at(addr())) {
  68     return NativeTrampolineCall::from(addr())->destination();
  69   }
  70   ShouldNotReachHere();
  71   return NULL;
  72 }
  73 
  74 void NativeCall::set_destination(address dest) {
  75   assert(is_call(), "not a call");
  76   if (NativeImmCall::is_at(addr())) {
  77     NativeImmCall::from(addr())->set_destination(dest);
  78   } else if (NativeMovConstReg::is_at(addr())) {
  79     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
  80   } else if (NativeTrampolineCall::is_at(addr())) {
  81     NativeTrampolineCall::from(addr())->set_destination(dest);
  82   } else {
  83     ShouldNotReachHere();
  84   }
  85 }
  86 
  87 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
  88   assert(is_call(), "not a call");
  89 
  90   // patching should be not only safe (i.e. this call could be executed by some thread),
  91   // but it also should be atomic (some other thread could call NativeCall::destination()
  92   // and see valid destination value)
  93 
  94   if (NativeImmCall::is_at(addr())) {
  95     assert(false, "could be patched mt_safe way, but should not be requested to. "
  96            "Known mt_safe requests have arbitrary destination offset. "
  97            "Use trampoline_call for this.");
  98     ShouldNotCallThis();
  99   } else if (NativeTrampolineCall::is_at(addr())) {
 100     NativeTrampolineCall::from(addr())->set_destination_mt_safe(dest);
 101   } else {
 102     ShouldNotReachHere();
 103   }
 104 }
 105 
 106 void NativeCall::insert(address code_pos, address entry) {
 107   Unimplemented();
 108 }
 109 
 110 bool NativeCall::is_call_before(address return_address) {
 111   return is_at(return_address - NativeImmCall::instruction_size) ||
 112     is_at(return_address - NativeCall::instruction_size);
 113 }
 114 
 115 address NativeCall::next_instruction_address() const {
 116   assert(is_call(), "not a call");
 117   if (NativeImmCall::is_at(addr())) {
 118     return NativeImmCall::from(addr())->next_instruction_address();
 119   } else if (NativeMovConstReg::is_at(addr())) {
 120     NativeMovConstReg *nm = NativeMovConstReg::from(addr());
 121     address next_instr = nm->next_instruction_address();
 122     assert(NativeRegCall::is_at(next_instr), "should be");
 123     return NativeRegCall::from(next_instr)->next_instruction_address();
 124   } else if (NativeTrampolineCall::is_at(addr())) {
 125     return NativeTrampolineCall::from(addr())->next_instruction_address();
 126   } else {
 127     ShouldNotReachHere();
 128     return NULL;
 129   }
 130 }
 131 
 132 address NativeCall::return_address() const {
 133   return next_instruction_address();
 134 }
 135 
 136 bool NativeCall::is_at(address addr) {
 137   if (NativeImmCall::is_at(addr)) {
 138     return true;
 139   } else if (NativeMovConstReg::is_at(addr)) {
 140     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 141     address next_instr = nm->next_instruction_address();
 142     return NativeRegCall::is_at(next_instr) &&
 143       NativeRegCall::from(next_instr)->destination() == nm->destination();
 144   } else if (NativeTrampolineCall::is_at(addr)) {
 145     return true;
 146   }
 147   return false;
 148 }
 149 
 150 NativeCall* NativeCall::from(address addr) {
 151   assert(NativeCall::is_at(addr), "");
 152   return (NativeCall*) addr;
 153 }
 154 
 155 //-------------------------------------------------------------------
 156 
 157 address NativeTrampolineCall::destination() const {
 158   assert(is_at(addr()), "not call");
 159   return (address) uint_at(8);
 160 }
 161 
 162 void NativeTrampolineCall::set_destination(address dest) {
 163   assert(is_at(addr()), "not call");
 164   set_uint_at(8, (uintptr_t) dest);
 165 }
 166 
 167 void NativeTrampolineCall::set_destination_mt_safe(address dest, bool assert_lock) {
 168   assert(is_at(addr()), "not call");
 169   set_destination(dest);
 170   // FIXME invalidate data cache
 171 }
 172 
 173 bool NativeTrampolineCall::is_at(address addr) {
 174   return (as_uint(addr    ) & ~0xffu) == 0xe28fe000  // add     lr, pc, #disp
 175        && as_uint(addr + 4)          == 0xe51ff004; // ldr     pc, [pc, -4]
 176 }
 177 
 178 NativeTrampolineCall* NativeTrampolineCall::from(address addr) {
 179   assert(NativeTrampolineCall::is_at(addr), "");
 180   return (NativeTrampolineCall*) addr;
 181 }
 182 
 183 //-------------------------------------------------------------------
 184 
 185 address NativeImmCall::destination() const {
 186   assert(is_imm_call(), "not call");
 187   uint32_t insn = as_uint();
 188   intptr_t off = Instruction_aarch32::sextract(insn, 23, 0);
 189   address destination = addr() + 8 + (off << 2);
 190   return destination;
 191 }
 192 
 193 void NativeImmCall::set_destination(address dest) {
 194   assert(is_imm_call(), "not call");
 195   patch_offset_to(dest);
 196 }
 197 
 198 bool NativeImmCall::is_at(address addr) {
 199   return Instruction_aarch32::extract(as_uint(addr), 27, 24)  == 0b1011;
 200 }
 201 
 202 NativeImmCall* NativeImmCall::from(address addr) {
 203   assert(NativeImmCall::is_at(addr), "");
 204   return (NativeImmCall*) addr;
 205 }
 206 
 207 //-------------------------------------------------------------------
 208 
 209 Register NativeRegCall::destination() const {
 210   assert(is_reg_call(), "not call");
 211   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 212 }
 213 
 214 bool NativeRegCall::is_at(address addr) {
 215   unsigned insn = as_uint(addr);
 216   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0011;
 217 }
 218 
 219 NativeRegCall* NativeRegCall::from(address addr) {
 220   assert(NativeRegCall::is_at(addr), "");
 221   return (NativeRegCall*) addr;
 222 }
 223 
 224 //-------------------------------------------------------------------
 225 
 226 void NativeMovConstReg::verify() {
 227   if (!is_mov_const_reg()) {
 228     fatal("not a mov const reg");
 229   }
 230 }
 231 
 232 intptr_t NativeMovConstReg::data() const {
 233   return (intptr_t) MacroAssembler::target_addr_for_insn(addr());
 234 }
 235 
 236 void NativeMovConstReg::set_data(intptr_t x) {
 237   MacroAssembler::pd_patch_instruction(addr(), (address)x);
 238   ICache::invalidate_range(addr(), max_instruction_size);
 239 };
 240 
 241 void NativeMovConstReg::print() {
 242   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 243                 p2i(addr()), data());
 244 }
 245 
 246 Register NativeMovConstReg::destination() const {
 247   return (Register) Instruction_aarch32::extract(as_uint(), 15, 12);
 248 }
 249 
 250 NativeMovConstReg* NativeMovConstReg::from(address addr) {
 251   assert(NativeMovConstReg::is_at(addr), "");
 252   return (NativeMovConstReg*) addr;
 253 }
 254 
 255 bool NativeMovConstReg::is_movw_movt_at(address addr) {
 256   unsigned insn = as_uint(addr);
 257   unsigned insn2 = as_uint(addr + arm_insn_sz);
 258   return Instruction_aarch32::extract(insn,  27, 20) == 0b00110000 && //mov
 259          Instruction_aarch32::extract(insn2, 27, 20) == 0b00110100;   //movt
 260 }
 261 
 262 bool NativeMovConstReg::is_ldr_literal_at(address addr) {
 263   unsigned insn = as_uint(addr);
 264   return (Instruction_aarch32::extract(insn, 27, 16) & 0b111001011111) == 0b010000011111;
 265 }
 266 
 267 bool NativeMovConstReg::is_mov_n_three_orr_at(address addr) {
 268   return (Instruction_aarch32::extract(as_uint(addr), 27, 16) & 0b111111101111) == 0b001110100000 &&
 269           Instruction_aarch32::extract(as_uint(addr+arm_insn_sz), 27, 20) == 0b00111000 &&
 270           Instruction_aarch32::extract(as_uint(addr+2*arm_insn_sz), 27, 20) == 0b00111000 &&
 271           Instruction_aarch32::extract(as_uint(addr+3*arm_insn_sz), 27, 21) == 0b0011100;
 272 }
 273 
 274 bool NativeMovConstReg::is_at(address addr) {
 275   return is_ldr_literal_at(addr) ||
 276           is_movw_movt_at(addr) ||
 277           is_mov_n_three_orr_at(addr);
 278 }
 279 
 280 //-------------------------------------------------------------------
 281 // TODO review
 282 address NativeMovRegMem::instruction_address() const {
 283   return addr();
 284 }
 285 
 286 int NativeMovRegMem::offset() const  {
 287   address pc = addr();
 288   unsigned insn = *(unsigned*)pc;
 289   if (Instruction_aarch32::extract(insn, 28, 24) == 0b10000) {
 290     address addr = MacroAssembler::target_addr_for_insn(pc);
 291     return *addr;
 292   } else {
 293     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(addr());
 294   }
 295 }
 296 
 297 void NativeMovRegMem::set_offset(int x) {
 298   address pc = addr();
 299   // FIXME seems not very roboust
 300   MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
 301   ICache::invalidate_range(addr(), instruction_size);
 302 }
 303 
 304 void NativeMovRegMem::verify() {
 305 #ifdef ASSERT
 306   address dest = MacroAssembler::target_addr_for_insn(addr());
 307 #endif
 308 }
 309 
 310 //--------------------------------------------------------------------------------
 311 
 312 void NativeJump::verify() {
 313   if (!is_jump()) {
 314     fatal("not a call");
 315   }
 316 }
 317 
 318 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 319 }
 320 
 321 address NativeJump::jump_destination() const {
 322   assert(is_jump(), "not a call");
 323   if (NativeImmJump::is_at(addr())) {
 324     return NativeImmJump::from(addr())->destination();
 325   } else if (NativeMovConstReg::is_at(addr())) {
 326     return address(NativeMovConstReg::from(addr())->data());
 327   }
 328   ShouldNotReachHere();
 329   return NULL;
 330 }
 331 
 332 void NativeJump::set_jump_destination(address dest) {
 333   assert(is_jump(), "not a call");
 334   if (NativeImmJump::is_at(addr())) {
 335     NativeImmJump::from(addr())->set_destination(dest);
 336   } else if (NativeMovConstReg::is_at(addr())) {
 337     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
 338   } else {
 339     ShouldNotReachHere();
 340   }
 341 }
 342 
 343 address NativeJump::next_instruction_address() const {
 344   assert(is_jump(), "not a call");
 345   if (NativeImmJump::is_at(addr())) {
 346     return NativeImmJump::from(addr())->next_instruction_address();
 347   } else if (NativeMovConstReg::is_at(addr())) {
 348     address after_move = NativeMovConstReg::from(addr())->next_instruction_address();
 349     assert(NativeRegJump::is_at(after_move), "should be jump");
 350     return NativeRegJump::from(after_move)->next_instruction_address();
 351   }
 352   ShouldNotReachHere();
 353   return NULL;
 354 }
 355 
 356 bool NativeJump::is_at(address addr) {
 357   if (NativeImmJump::is_at(addr)) {
 358     return true;
 359   }
 360   if (NativeMovConstReg::is_at(addr)) {
 361     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 362     address next_instr = nm->next_instruction_address();
 363     return NativeRegJump::is_at(next_instr) &&
 364       NativeRegJump::from(next_instr)->destination() == nm->destination();
 365   }
 366   return false;
 367 }
 368 
 369 NativeJump* NativeJump::from(address addr) {
 370   assert(NativeJump::is_at(addr), "");
 371   return (NativeJump*) addr;
 372 }
 373 
 374 // MT-safe inserting of a jump over a jump or a nop (used by
 375 // nmethod::make_not_entrant_or_zombie)
 376 
 377 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 378 
 379   assert(dest == SharedRuntime::get_handle_wrong_method_stub(),
 380      "expected fixed destination of patch");
 381   assert(NativeInstruction::from(verified_entry)->is_jump_or_nop() ||
 382       NativeInstruction::from(verified_entry)->is_sigill_zombie_not_entrant(),
 383          "Aarch32 cannot replace non-jump with jump");
 384 
 385   // Patch this nmethod atomically.
 386   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 387     assert((((intptr_t) dest & 0x3) == 0) && (((intptr_t) verified_entry & 0x3) == 0),
 388         "addresses should be aligned on 4");
 389     ptrdiff_t disp = (dest - verified_entry - 8) >> 2;
 390     guarantee((-(1 << 23) <= disp) && (disp < (1 << 23)), "branch overflow");
 391 
 392     unsigned int insn = (0b11101010 << 24) | (disp & 0xffffff);
 393     *(unsigned int*)verified_entry = insn;
 394   } else {
 395     // We use an illegal instruction for marking a method as
 396     // not_entrant or zombie.
 397     NativeIllegalInstruction::insert(verified_entry);
 398   }
 399 
 400   ICache::invalidate_range(verified_entry, instruction_size);
 401 }
 402 
 403 //-------------------------------------------------------------------
 404 
 405 bool NativeBranchType::is_branch_type(uint32_t insn) {
 406   return Instruction_aarch32::extract(insn, 27, 20) == 0b00010010 &&
 407     Instruction_aarch32::extract(insn, 19, 8) == 0b111111111111;
 408 }
 409 
 410 void NativeBranchType::patch_offset_to(address dest) {
 411   uint32_t insn = as_uint();
 412   const intptr_t off = (dest - (addr() + 8));
 413   assert((off & 3) == 0, "should be");
 414   assert(-32 * 1024 * 1024 <= off && off < 32 * 1024 * 1042,
 415       "new offset should fit in instruction");
 416 
 417   const unsigned off_mask = ((1U << 24) - 1);
 418   insn &= ~off_mask; // mask off offset part
 419   insn |= ((unsigned) off >> 2) & off_mask;
 420 
 421   set_uint(insn);
 422   ICache::invalidate_range(addr_at(0), instruction_size);
 423 }
 424 
 425 //-------------------------------------------------------------------
 426 
 427 address NativeImmJump::destination() const {
 428   assert(is_imm_jump(), "not jump");
 429   return addr() + 8 + 4 * Instruction_aarch32::sextract(as_uint(), 23, 0);
 430 }
 431 
 432 void NativeImmJump::set_destination(address addr) {
 433   assert(is_imm_jump(), "");
 434   patch_offset_to(addr);
 435 }
 436 
 437 bool NativeImmJump::is_at(address addr) {
 438   unsigned insn = as_uint(addr);
 439   return Instruction_aarch32::extract(insn, 27, 24)  == 0b1010;
 440 }
 441 
 442 NativeImmJump* NativeImmJump::from(address addr) {
 443   assert(NativeImmJump::is_at(addr), "");
 444   return (NativeImmJump*) addr;
 445 }
 446 
 447 //-------------------------------------------------------------------
 448 
 449 bool NativeRegJump::is_at(address addr) {
 450   unsigned insn = as_uint(addr);
 451   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0001;
 452 }
 453 
 454 NativeRegJump* NativeRegJump::from(address addr) {
 455   assert(NativeRegJump::is_at(addr), "");
 456   return (NativeRegJump*) addr;
 457 }
 458 
 459 Register NativeRegJump::destination() const {
 460   assert(is_reg_jump(), "");
 461   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 462 }
 463 
 464 //-------------------------------------------------------------------
 465 
 466 bool NativeInstruction::is_safepoint_poll() {
 467   // a safepoint_poll is implemented in two steps as
 468   //
 469   // movw(r9, polling_page & 0xffff);
 470   // movt(r9, polling_page >> 16);
 471   // ldr(r9, [r9, #0]);
 472   //
 473   // We can rely on this instructions order until we have only C1
 474 
 475     if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2))  {
 476       bool res = false;
 477       unsigned paddr = (unsigned)os::get_polling_page();
 478 
 479       unsigned addr_lo = paddr & 0xffff;
 480       unsigned addr_hi = paddr >> 16;
 481 
 482       Register scratch = rscratch1;
 483 
 484       res =        from(addr() - 0x8)->is_movw(scratch, addr_lo);
 485       res = res && from(addr() - 0x4)->is_movt(scratch, addr_hi);
 486       res = res && from(addr() - 0x0)->is_ldr(scratch, Address(scratch));
 487 
 488       return res;
 489   } else {
 490     assert(false, "not implemented");
 491     return false;
 492   }
 493 }
 494 
 495 bool NativeInstruction::is_movt(Register dst, unsigned imm, Assembler::Condition cond) {
 496   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110100;
 497   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 498   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 499   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 500   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 501 
 502   return a1 && a2 && a3 && a4 && a5;
 503 }
 504 
 505 bool NativeInstruction::is_movw(Register dst, unsigned imm, Assembler::Condition cond) {
 506   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110000;
 507   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 508   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 509   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 510   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 511 
 512   return a1 && a2 && a3 && a4 && a5;
 513 }
 514 
 515 bool NativeInstruction::is_ldr(Register dst, Address addr, Assembler::Condition cond) {
 516     assert(addr.get_mode() == Address::imm, "unimplemented");
 517     assert(addr.get_wb_mode() == Address::off, "unimplemented");
 518     assert(addr.index() == noreg, "unimplemented");
 519     assert(addr.offset() == 0, "unimplemented");
 520 
 521     bool b0 = Instruction_aarch32::extract(uint_at(0), 24, 24) == 1; //P
 522     bool b1 = Instruction_aarch32::extract(uint_at(0), 23, 23) == 1; //U
 523     bool b2 = Instruction_aarch32::extract(uint_at(0), 21, 21) == 0; //W
 524     bool b3 = Instruction_aarch32::extract(uint_at(0), 19, 16) == (unsigned)addr.base();
 525     bool b4 = Instruction_aarch32::extract(uint_at(0), 11, 0) == 0;
 526 
 527     bool a1 = b0 && b1 && b2 && b3 && b4; //Address encoding
 528 
 529     bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 530     bool a3 = Instruction_aarch32::extract(uint_at(0), 20, 20) == 1;
 531     bool a4 = Instruction_aarch32::extract(uint_at(0), 22, 22) == 0;
 532     bool a5 = Instruction_aarch32::extract(uint_at(0), 27, 25) == 0b010;
 533     bool a6 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 534 
 535     return a1 && a2 && a3 && a4 && a5 && a6;
 536 }
 537 
 538 
 539 bool NativeInstruction::is_movt() {
 540   return Instruction_aarch32::extract(int_at(0), 27, 20) == 0b00110100;
 541 }
 542 
 543 bool NativeInstruction::is_orr() {
 544   return Instruction_aarch32::extract(int_at(0), 27, 21) == 0b0011100;
 545 }
 546 
 547 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 548   return as_uint() == 0xe7fdeafd; // udf #0xdead
 549 }
 550 
 551 void NativeIllegalInstruction::insert(address code_pos) {
 552   *(juint*)code_pos = 0xe7fdeafd; // udf #0xdead
 553 }
 554 
 555 //-------------------------------------------------------------------
 556 
 557 void NativeGeneralJump::verify() {  }
 558 
 559 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 560   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 561 
 562   CodeBuffer cb(code_pos, instruction_size);
 563   MacroAssembler a(&cb);
 564 
 565   a.b(entry);
 566 
 567   ICache::invalidate_range(code_pos, instruction_size);
 568 }
 569 
 570 // MT-safe patching of a long jump instruction.
 571 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 572   const address patching_switch_addr = code_buffer + patching_copy_buff_len;
 573   NativeImmJump* patching_switch = NativeImmJump::from(patching_switch_addr);
 574   assert(!NativeInstruction::from(instr_addr)->is_patched_already(), "not patched yet");
 575   assert(patching_switch->destination() == patching_switch_addr + NativeInstruction::arm_insn_sz,
 576          "switch should be branch to next instr at this point");
 577   patching_switch->set_destination(instr_addr + patching_copy_buff_len);
 578   ICache::invalidate_word(patching_switch_addr);
 579 
 580   NativeImmJump* nj = NativeImmJump::from(instr_addr); // checking that it is a jump
 581   nj->set_destination(code_buffer);
 582   ICache::invalidate_word(instr_addr);
 583 
 584   assert(NativeInstruction::from(instr_addr)->is_patched_already(), "should patched already");
 585 }
 586 
 587 bool NativeInstruction::is_patched_already() const {
 588   if (NativeImmJump::is_at(addr())) {
 589     address maybe_copy_buff = NativeImmJump::from(addr())->destination();
 590     address maybe_patching_switch = maybe_copy_buff + patching_copy_buff_len;
 591     if (NativeImmJump::is_at(maybe_patching_switch)) {
 592       return NativeImmJump::from(maybe_patching_switch)->destination() == addr() + patching_copy_buff_len;
 593     }
 594   }
 595   return false;
 596 }