1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "nativeInst_aarch32.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/handles.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/stubRoutines.hpp"
  36 #include "utilities/ostream.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 
  41 // LIRAssembler fills patching site with nops up to NativeCall::instruction_size
  42 static const int patching_copy_buff_len = NativeCall::instruction_size;
  43 
  44 NativeInstruction* NativeInstruction::from(address addr) {
  45   return (NativeInstruction*) addr;
  46 }
  47 
  48 //-------------------------------------------------------------------
  49 
  50 void NativeCall::verify() {
  51   if (!is_call()) {
  52     fatal("not a call");
  53   }
  54 }
  55 
  56 address NativeCall::destination() const {
  57   assert(is_call(), "not a call");
  58   if (NativeImmCall::is_at(addr())) {
  59     return NativeImmCall::from(addr())->destination();
  60   } else if (NativeMovConstReg::is_at(addr())) {
  61     return address(NativeMovConstReg::from(addr())->data());
  62   } else if (NativeTrampolineCall::is_at(addr())) {
  63     return NativeTrampolineCall::from(addr())->destination();
  64   }
  65   ShouldNotReachHere();
  66   return NULL;
  67 }
  68 
  69 void NativeCall::set_destination(address dest) {
  70   assert(is_call(), "not a call");
  71   if (NativeImmCall::is_at(addr())) {
  72     NativeImmCall::from(addr())->set_destination(dest);
  73   } else if (NativeMovConstReg::is_at(addr())) {
  74     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
  75   } else if (NativeTrampolineCall::is_at(addr())) {
  76     NativeTrampolineCall::from(addr())->set_destination(dest);
  77   } else {
  78     ShouldNotReachHere();
  79   }
  80 }
  81 
  82 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
  83   assert(is_call(), "not a call");
  84 
  85   // patching should be not only safe (i.e. this call could be executed by some thread),
  86   // but it also should be atomic (some other thread could call NativeCall::destination()
  87   // and see valid destination value)
  88 
  89   if (NativeImmCall::is_at(addr())) {
  90     assert(false, "could be patched mt_safe way, but should not be requested to. "
  91            "Known mt_safe requests have arbitrary destination offset. "
  92            "Use trampoline_call for this.");
  93     ShouldNotCallThis();
  94   } else if (NativeTrampolineCall::is_at(addr())) {
  95     NativeTrampolineCall::from(addr())->set_destination_mt_safe(dest);
  96   } else {
  97     ShouldNotReachHere();
  98   }
  99 }
 100 
 101 void NativeCall::insert(address code_pos, address entry) {
 102   Unimplemented();
 103 }
 104 
 105 bool NativeCall::is_call_before(address return_address) {
 106   return is_at(return_address - NativeImmCall::instruction_size) ||
 107     is_at(return_address - NativeCall::instruction_size);
 108 }
 109 
 110 address NativeCall::next_instruction_address() const {
 111   assert(is_call(), "not a call");
 112   if (NativeImmCall::is_at(addr())) {
 113     return NativeImmCall::from(addr())->next_instruction_address();
 114   } else if (NativeMovConstReg::is_at(addr())) {
 115     NativeMovConstReg *nm = NativeMovConstReg::from(addr());
 116     address next_instr = nm->next_instruction_address();
 117     assert(NativeRegCall::is_at(next_instr), "should be");
 118     return NativeRegCall::from(next_instr)->next_instruction_address();
 119   } else if (NativeTrampolineCall::is_at(addr())) {
 120     return NativeTrampolineCall::from(addr())->next_instruction_address();
 121   } else {
 122     ShouldNotReachHere();
 123     return NULL;
 124   }
 125 }
 126 
 127 address NativeCall::return_address() const {
 128   return next_instruction_address();
 129 }
 130 
 131 bool NativeCall::is_at(address addr) {
 132   if (NativeImmCall::is_at(addr)) {
 133     return true;
 134   } else if (NativeMovConstReg::is_at(addr)) {
 135     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 136     address next_instr = nm->next_instruction_address();
 137     return NativeRegCall::is_at(next_instr) &&
 138       NativeRegCall::from(next_instr)->destination() == nm->destination();
 139   } else if (NativeTrampolineCall::is_at(addr)) {
 140     return true;
 141   }
 142   return false;
 143 }
 144 
 145 NativeCall* NativeCall::from(address addr) {
 146   assert(NativeCall::is_at(addr), "");
 147   return (NativeCall*) addr;
 148 }
 149 
 150 //-------------------------------------------------------------------
 151 
 152 address NativeTrampolineCall::destination() const {
 153   assert(is_at(addr()), "not call");
 154   return (address) uint_at(8);
 155 }
 156 
 157 void NativeTrampolineCall::set_destination(address dest) {
 158   assert(is_at(addr()), "not call");
 159   set_uint_at(8, (uintptr_t) dest);
 160 }
 161 
 162 void NativeTrampolineCall::set_destination_mt_safe(address dest, bool assert_lock) {
 163   assert(is_at(addr()), "not call");
 164   set_destination(dest);
 165   // FIXME invalidate data cache
 166 }
 167 
 168 bool NativeTrampolineCall::is_at(address addr) {
 169   return as_uint(addr    ) == 0xe28fe004    // add     lr, pc, #4
 170       && as_uint(addr + 4) == 0xe51ff004;   // ldr     pc, [pc, -4]
 171 }
 172 
 173 NativeTrampolineCall* NativeTrampolineCall::from(address addr) {
 174   assert(NativeTrampolineCall::is_at(addr), "");
 175   return (NativeTrampolineCall*) addr;
 176 }
 177 
 178 //-------------------------------------------------------------------
 179 
 180 address NativeImmCall::destination() const {
 181   assert(is_imm_call(), "not call");
 182   uint32_t insn = as_uint();
 183   intptr_t off = Instruction_aarch32::sextract(insn, 23, 0);
 184   address destination = addr() + 8 + (off << 2);
 185   return destination;
 186 }
 187 
 188 void NativeImmCall::set_destination(address dest) {
 189   assert(is_imm_call(), "not call");
 190   patch_offset_to(dest);
 191 }
 192 
 193 bool NativeImmCall::is_at(address addr) {
 194   return Instruction_aarch32::extract(as_uint(addr), 27, 24)  == 0b1011;
 195 }
 196 
 197 NativeImmCall* NativeImmCall::from(address addr) {
 198   assert(NativeImmCall::is_at(addr), "");
 199   return (NativeImmCall*) addr;
 200 }
 201 
 202 //-------------------------------------------------------------------
 203 
 204 Register NativeRegCall::destination() const {
 205   assert(is_reg_call(), "not call");
 206   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 207 }
 208 
 209 bool NativeRegCall::is_at(address addr) {
 210   unsigned insn = as_uint(addr);
 211   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0011;
 212 }
 213 
 214 NativeRegCall* NativeRegCall::from(address addr) {
 215   assert(NativeRegCall::is_at(addr), "");
 216   return (NativeRegCall*) addr;
 217 }
 218 
 219 //-------------------------------------------------------------------
 220 
 221 void NativeMovConstReg::verify() {
 222   if (!is_mov_const_reg()) {
 223     fatal("not a call");
 224   }
 225 }
 226 
 227 intptr_t NativeMovConstReg::data() const {
 228   // FIXME seems not very roboust
 229   // das(uint64_t(addr()),2);
 230   return (intptr_t) MacroAssembler::target_addr_for_insn(addr());
 231 }
 232 
 233 void NativeMovConstReg::set_data(intptr_t x) {
 234   // FIXME seems not very roboust
 235   MacroAssembler::pd_patch_instruction(addr(), (address)x);
 236   ICache::invalidate_range(addr(), max_instruction_size);
 237 };
 238 
 239 void NativeMovConstReg::print() {
 240   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 241                 p2i(addr()), data());
 242 }
 243 
 244 Register NativeMovConstReg::destination() const {
 245   Register d = (Register) Instruction_aarch32::extract(as_uint(), 15, 12);
 246   assert(d == (Register) Instruction_aarch32::extract(as_uint(addr() + arm_insn_sz), 15, 12),
 247       "movw and movt should load same register");
 248   return d;
 249 }
 250 
 251 NativeMovConstReg* NativeMovConstReg::from(address addr) {
 252   assert(NativeMovConstReg::is_at(addr), "");
 253   return (NativeMovConstReg*) addr;
 254 }
 255 
 256 bool NativeMovConstReg::is_movw_movt_at(address addr) {
 257   // Hopefully this is almost always ok - not sure about if at end
 258   unsigned insn = as_uint(addr);
 259   unsigned insn2 = as_uint(addr + arm_insn_sz);
 260   return Instruction_aarch32::extract(insn,  27, 20) == 0b00110000 && //mov
 261          Instruction_aarch32::extract(insn2, 27, 20) == 0b00110100;   //movt
 262 }
 263 
 264 bool NativeMovConstReg::is_ldr_literal_at(address addr) {
 265   unsigned insn = as_uint(addr);
 266   return (Instruction_aarch32::extract(insn, 27, 16) & 0b111001011111) == 0b010000011111;
 267 }
 268 
 269 bool NativeMovConstReg::is_at(address addr) {
 270   return NativeMovConstReg::is_movw_movt_at(addr) ||
 271     NativeMovConstReg::is_ldr_literal_at(addr);
 272 }
 273 
 274 
 275 //-------------------------------------------------------------------
 276 // TODO review
 277 address NativeMovRegMem::instruction_address() const {
 278   return addr();
 279 }
 280 
 281 int NativeMovRegMem::offset() const  {
 282   address pc = addr();
 283   unsigned insn = *(unsigned*)pc;
 284   if (Instruction_aarch32::extract(insn, 28, 24) == 0b10000) {
 285     address addr = MacroAssembler::target_addr_for_insn(pc);
 286     return *addr;
 287   } else {
 288     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(addr());
 289   }
 290 }
 291 
 292 void NativeMovRegMem::set_offset(int x) {
 293   address pc = addr();
 294   // FIXME seems not very roboust
 295   MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
 296   ICache::invalidate_range(addr(), instruction_size);
 297 }
 298 
 299 void NativeMovRegMem::verify() {
 300 #ifdef ASSERT
 301   address dest = MacroAssembler::target_addr_for_insn(addr());
 302 #endif
 303 }
 304 
 305 //--------------------------------------------------------------------------------
 306 
 307 void NativeJump::verify() {
 308   if (!is_jump()) {
 309     fatal("not a call");
 310   }
 311 }
 312 
 313 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 314 }
 315 
 316 address NativeJump::jump_destination() const {
 317   assert(is_jump(), "not a call");
 318   if (NativeImmJump::is_at(addr())) {
 319     return NativeImmJump::from(addr())->destination();
 320   } else if (NativeMovConstReg::is_at(addr())) {
 321     return address(NativeMovConstReg::from(addr())->data());
 322   }
 323   ShouldNotReachHere();
 324   return NULL;
 325 }
 326 
 327 void NativeJump::set_jump_destination(address dest) {
 328   assert(is_jump(), "not a call");
 329   if (NativeImmJump::is_at(addr())) {
 330     NativeImmJump::from(addr())->set_destination(dest);
 331   } else if (NativeMovConstReg::is_at(addr())) {
 332     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
 333   } else {
 334     ShouldNotReachHere();
 335   }
 336 }
 337 
 338 address NativeJump::next_instruction_address() const {
 339   assert(is_jump(), "not a call");
 340   if (NativeImmJump::is_at(addr())) {
 341     return NativeImmJump::from(addr())->next_instruction_address();
 342   } else if (NativeMovConstReg::is_at(addr())) {
 343     address after_move = NativeMovConstReg::from(addr())->next_instruction_address();
 344     assert(NativeRegJump::is_at(after_move), "should be jump");
 345     return NativeRegJump::from(after_move)->next_instruction_address();
 346   }
 347   ShouldNotReachHere();
 348   return NULL;
 349 }
 350 
 351 bool NativeJump::is_at(address addr) {
 352   if (NativeImmJump::is_at(addr)) {
 353     return true;
 354   }
 355   if (NativeMovConstReg::is_at(addr)) {
 356     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 357     address next_instr = nm->next_instruction_address();
 358     return NativeRegJump::is_at(next_instr) &&
 359       NativeRegJump::from(next_instr)->destination() == nm->destination();
 360   }
 361   return false;
 362 }
 363 
 364 NativeJump* NativeJump::from(address addr) {
 365   assert(NativeJump::is_at(addr), "");
 366   return (NativeJump*) addr;
 367 }
 368 
 369 // MT-safe inserting of a jump over a jump or a nop (used by
 370 // nmethod::make_not_entrant_or_zombie)
 371 
 372 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 373 
 374   assert(dest == SharedRuntime::get_handle_wrong_method_stub(),
 375      "expected fixed destination of patch");
 376   assert(NativeInstruction::from(verified_entry)->is_jump_or_nop() ||
 377       NativeInstruction::from(verified_entry)->is_sigill_zombie_not_entrant(),
 378          "Aarch32 cannot replace non-jump with jump");
 379 
 380   // Patch this nmethod atomically.
 381   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 382     assert((((intptr_t) dest & 0x3) == 0) && (((intptr_t) verified_entry & 0x3) == 0),
 383         "addresses should be aligned on 4");
 384     ptrdiff_t disp = (dest - verified_entry - 8) >> 2;
 385     guarantee((-(1 << 23) <= disp) && (disp < (1 << 23)), "branch overflow");
 386 
 387     unsigned int insn = (0b11101010 << 24) | (disp & 0xffffff);
 388     *(unsigned int*)verified_entry = insn;
 389   } else {
 390     // We use an illegal instruction for marking a method as
 391     // not_entrant or zombie.
 392     NativeIllegalInstruction::insert(verified_entry);
 393   }
 394 
 395   ICache::invalidate_range(verified_entry, instruction_size);
 396 }
 397 
 398 //-------------------------------------------------------------------
 399 
 400 bool NativeBranchType::is_branch_type(uint32_t insn) {
 401   return Instruction_aarch32::extract(insn, 27, 20) == 0b00010010 &&
 402     Instruction_aarch32::extract(insn, 19, 8) == 0b111111111111;
 403 }
 404 
 405 void NativeBranchType::patch_offset_to(address dest) {
 406   uint32_t insn = as_uint();
 407   const intptr_t off = (dest - (addr() + 8));
 408   assert((off & 3) == 0, "should be");
 409   assert(-32 * 1024 * 1024 <= off && off < 32 * 1024 * 1042,
 410       "new offset should fit in instruction");
 411 
 412   const unsigned off_mask = ((1U << 24) - 1);
 413   insn &= ~off_mask; // mask off offset part
 414   insn |= ((unsigned) off >> 2) & off_mask;
 415 
 416   set_uint(insn);
 417   ICache::invalidate_range(addr_at(0), instruction_size);
 418 }
 419 
 420 //-------------------------------------------------------------------
 421 
 422 address NativeImmJump::destination() const {
 423   assert(is_imm_jump(), "not jump");
 424   return addr() + 8 + 4 * Instruction_aarch32::sextract(as_uint(), 23, 0);
 425 }
 426 
 427 void NativeImmJump::set_destination(address addr) {
 428   assert(is_imm_jump(), "");
 429   patch_offset_to(addr);
 430 }
 431 
 432 bool NativeImmJump::is_at(address addr) {
 433   unsigned insn = as_uint(addr);
 434   return Instruction_aarch32::extract(insn, 27, 24)  == 0b1010;
 435 }
 436 
 437 NativeImmJump* NativeImmJump::from(address addr) {
 438   assert(NativeImmJump::is_at(addr), "");
 439   return (NativeImmJump*) addr;
 440 }
 441 
 442 //-------------------------------------------------------------------
 443 
 444 bool NativeRegJump::is_at(address addr) {
 445   unsigned insn = as_uint(addr);
 446   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0001;
 447 }
 448 
 449 NativeRegJump* NativeRegJump::from(address addr) {
 450   assert(NativeRegJump::is_at(addr), "");
 451   return (NativeRegJump*) addr;
 452 }
 453 
 454 Register NativeRegJump::destination() const {
 455   assert(is_reg_jump(), "");
 456   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 457 }
 458 
 459 //-------------------------------------------------------------------
 460 
 461 bool NativeInstruction::is_safepoint_poll() {
 462   // a safepoint_poll is implemented in two steps as
 463   //
 464   // movw(r9, polling_page & 0xffff);
 465   // movt(r9, polling_page >> 16);
 466   // ldr(r9, [r9, #0]);
 467   //
 468   // We can rely on this instructions order until we have only C1
 469 
 470     if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2))  {
 471       bool res = false;
 472       unsigned paddr = (unsigned)os::get_polling_page();
 473 
 474       unsigned addr_lo = paddr & 0xffff;
 475       unsigned addr_hi = paddr >> 16;
 476 
 477       Register scratch = rscratch1;
 478 
 479       res =        from(addr() - 0x8)->is_movw(scratch, addr_lo);
 480       res = res && from(addr() - 0x4)->is_movt(scratch, addr_hi);
 481       res = res && from(addr() - 0x0)->is_ldr(scratch, Address(scratch));
 482 
 483       return res;
 484   } else {
 485     assert(false, "not implemented");
 486     return false;
 487   }
 488 }
 489 
 490 bool NativeInstruction::is_movt(Register dst, unsigned imm, Assembler::Condition cond) {
 491   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110100;
 492   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 493   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 494   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 495   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == cond;
 496 
 497   return a1 && a2 && a3 && a4 && a5;
 498 }
 499 
 500 bool NativeInstruction::is_movw(Register dst, unsigned imm, Assembler::Condition cond) {
 501   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110000;
 502   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 503   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 504   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 505   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == cond;
 506 
 507   return a1 && a2 && a3 && a4 && a5;
 508 }
 509 
 510 bool NativeInstruction::is_ldr(Register dst, Address addr, Assembler::Condition cond) {
 511     assert(addr.get_mode() == Address::imm, "unimplemented");
 512     assert(addr.get_wb_mode() == Address::off, "unimplemented");
 513     assert(addr.index() == noreg, "unimplemented");
 514     assert(addr.offset() == 0, "unimplemented");
 515 
 516     bool b0 = Instruction_aarch32::extract(uint_at(0), 24, 24) == 1; //P
 517     bool b1 = Instruction_aarch32::extract(uint_at(0), 23, 23) == 1; //U
 518     bool b2 = Instruction_aarch32::extract(uint_at(0), 21, 21) == 0; //W
 519     bool b3 = Instruction_aarch32::extract(uint_at(0), 19, 16) == (unsigned)addr.base();
 520     bool b4 = Instruction_aarch32::extract(uint_at(0), 11, 0) == 0;
 521 
 522     bool a1 = b0 && b1 && b2 && b3 && b4; //Address encoding
 523 
 524     bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 525     bool a3 = Instruction_aarch32::extract(uint_at(0), 20, 20) == 1;
 526     bool a4 = Instruction_aarch32::extract(uint_at(0), 22, 22) == 0;
 527     bool a5 = Instruction_aarch32::extract(uint_at(0), 27, 25) == 0b010;
 528     bool a6 = Instruction_aarch32::extract(uint_at(0), 31, 28) == cond;
 529 
 530     return a1 && a2 && a3 && a4 && a5 && a6;
 531 }
 532 
 533 
 534 bool NativeInstruction::is_movt() {
 535   return Instruction_aarch32::extract(int_at(0), 27, 20) == 0b00110100;
 536 }
 537 
 538 bool NativeInstruction::is_orr() {
 539   return Instruction_aarch32::extract(int_at(0), 27, 21) == 0b0011100;
 540 }
 541 
 542 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 543   return as_uint() == 0xe7fdeafd; // udf #0xdead
 544 }
 545 
 546 void NativeIllegalInstruction::insert(address code_pos) {
 547   *(juint*)code_pos = 0xe7fdeafd; // udf #0xdead
 548 }
 549 
 550 //-------------------------------------------------------------------
 551 
 552 void NativeGeneralJump::verify() {  }
 553 
 554 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 555   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 556 
 557   CodeBuffer cb(code_pos, instruction_size);
 558   MacroAssembler a(&cb);
 559 
 560   a.b(entry);
 561 
 562   ICache::invalidate_range(code_pos, instruction_size);
 563 }
 564 
 565 // MT-safe patching of a long jump instruction.
 566 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 567   const address patching_switch_addr = code_buffer + patching_copy_buff_len;


 568   NativeImmJump* patching_switch = NativeImmJump::from(patching_switch_addr);
 569   assert(!NativeInstruction::from(instr_addr)->is_patched_already(), "not patched yet");
 570   assert(patching_switch->destination() == patching_switch_addr + NativeInstruction::arm_insn_sz,
 571          "switch should be branch to next instr at this point");
 572   patching_switch->set_destination(instr_addr + patching_copy_buff_len);
 573   ICache::invalidate_word(patching_switch_addr);
 574 
 575   NativeImmJump* nj = NativeImmJump::from(instr_addr); // checking that it is a jump
 576   nj->set_destination(code_buffer);
 577   ICache::invalidate_word(instr_addr);
 578 
 579   assert(NativeInstruction::from(instr_addr)->is_patched_already(), "should patched already");
 580 }
 581 
 582 bool NativeInstruction::is_patched_already() const {
 583   if (NativeImmJump::is_at(addr())) {
 584     address maybe_copy_buff = NativeImmJump::from(addr())->destination();
 585     address maybe_patching_switch = maybe_copy_buff + patching_copy_buff_len;
 586     if (NativeImmJump::is_at(maybe_patching_switch)) {
 587       return NativeImmJump::from(maybe_patching_switch)->destination() == addr() + patching_copy_buff_len;
 588     }
 589   }
 590   return false;
 591 }
--- EOF ---