1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "nativeInst_aarch32.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/handles.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/stubRoutines.hpp"
  36 #include "utilities/ostream.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 
  41 NativeInstruction* NativeInstruction::from(address addr) {
  42   return (NativeInstruction*) addr;
  43 }
  44 
  45 //-------------------------------------------------------------------
  46 
  47 void NativeCall::verify() {
  48   if (!is_call()) {
  49     fatal("not a call");
  50   }
  51 }
  52 
  53 address NativeCall::destination() const {
  54   assert(is_call(), "not a call");
  55   if (NativeImmCall::is_at(addr())) {
  56     return NativeImmCall::from(addr())->destination();
  57   } else if (NativeMovConstReg::is_at(addr())) {
  58     return address(NativeMovConstReg::from(addr())->data());
  59   } else if (NativeTrampolineCall::is_at(addr())) {
  60     return NativeTrampolineCall::from(addr())->destination();
  61   }
  62   ShouldNotReachHere();
  63   return NULL;
  64 }
  65 
  66 void NativeCall::set_destination(address dest) {
  67   assert(is_call(), "not a call");
  68   if (NativeImmCall::is_at(addr())) {
  69     NativeImmCall::from(addr())->set_destination(dest);
  70   } else if (NativeMovConstReg::is_at(addr())) {
  71     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
  72   } else if (NativeTrampolineCall::is_at(addr())) {
  73     NativeTrampolineCall::from(addr())->set_destination(dest);
  74   } else {
  75     ShouldNotReachHere();
  76   }
  77 }
  78 
  79 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
  80   assert(is_call(), "not a call");
  81 
  82   // patching should be not only safe (i.e. this call could be executed by some thread),
  83   // but it also should be atomic (some other thread could call NativeCall::destination()
  84   // and see valid destination value)
  85 
  86   if (NativeImmCall::is_at(addr())) {
  87     assert(false, "could be patched mt_safe way, but should not be requested to. "
  88            "Known mt_safe requests have arbitrary destination offset. "
  89            "Use trampoline_call for this.");
  90     ShouldNotCallThis();
  91   } else if (NativeTrampolineCall::is_at(addr())) {
  92     NativeTrampolineCall::from(addr())->set_destination_mt_safe(dest);
  93   } else {
  94     ShouldNotReachHere();
  95   }
  96 }
  97 
  98 void NativeCall::insert(address code_pos, address entry) {
  99   Unimplemented();
 100 }
 101 
 102 bool NativeCall::is_call_before(address return_address) {
 103   return is_at(return_address - NativeImmCall::instruction_size) ||
 104     is_at(return_address - NativeCall::instruction_size);
 105 }
 106 
 107 address NativeCall::next_instruction_address() const {
 108   assert(is_call(), "not a call");
 109   if (NativeImmCall::is_at(addr())) {
 110     return NativeImmCall::from(addr())->next_instruction_address();
 111   } else if (NativeMovConstReg::is_at(addr())) {
 112     NativeMovConstReg *nm = NativeMovConstReg::from(addr());
 113     address next_instr = nm->next_instruction_address();
 114     assert(NativeRegCall::is_at(next_instr), "should be");
 115     return NativeRegCall::from(next_instr)->next_instruction_address();
 116   } else if (NativeTrampolineCall::is_at(addr())) {
 117     return NativeTrampolineCall::from(addr())->next_instruction_address();
 118   } else {
 119     ShouldNotReachHere();
 120     return NULL;
 121   }
 122 }
 123 
 124 address NativeCall::return_address() const {
 125   return next_instruction_address();
 126 }
 127 
 128 bool NativeCall::is_at(address addr) {
 129   if (NativeImmCall::is_at(addr)) {
 130     return true;
 131   } else if (NativeMovConstReg::is_at(addr)) {
 132     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 133     address next_instr = nm->next_instruction_address();
 134     return NativeRegCall::is_at(next_instr) &&
 135       NativeRegCall::from(next_instr)->destination() == nm->destination();
 136   } else if (NativeTrampolineCall::is_at(addr)) {
 137     return true;
 138   }
 139   return false;
 140 }
 141 
 142 NativeCall* NativeCall::from(address addr) {
 143   assert(NativeCall::is_at(addr), "");
 144   return (NativeCall*) addr;
 145 }
 146 
 147 //-------------------------------------------------------------------
 148 
 149 address NativeTrampolineCall::destination() const {
 150   assert(is_at(addr()), "not call");
 151   return (address) uint_at(8);
 152 }
 153 
 154 void NativeTrampolineCall::set_destination(address dest) {
 155   assert(is_at(addr()), "not call");
 156   set_uint_at(8, (uintptr_t) dest);
 157 }
 158 
 159 void NativeTrampolineCall::set_destination_mt_safe(address dest, bool assert_lock) {
 160   assert(is_at(addr()), "not call");
 161   set_destination(dest);
 162   // FIXME invalidate data cache
 163 }
 164 
 165 bool NativeTrampolineCall::is_at(address addr) {
 166   return as_uint(addr    ) == 0xe28fe004    // add     lr, pc, #4
 167       && as_uint(addr + 4) == 0xe51ff004;   // ldr     pc, [pc, -4]
 168 }
 169 
 170 NativeTrampolineCall* NativeTrampolineCall::from(address addr) {
 171   assert(NativeTrampolineCall::is_at(addr), "");
 172   return (NativeTrampolineCall*) addr;
 173 }
 174 
 175 //-------------------------------------------------------------------
 176 
 177 address NativeImmCall::destination() const {
 178   assert(is_imm_call(), "not call");
 179   uint32_t insn = as_uint();
 180   intptr_t off = Instruction_aarch32::sextract(insn, 23, 0);
 181   address destination = addr() + 8 + (off << 2);
 182   return destination;
 183 }
 184 
 185 void NativeImmCall::set_destination(address dest) {
 186   assert(is_imm_call(), "not call");
 187   patch_offset_to(dest);
 188 }
 189 
 190 bool NativeImmCall::is_at(address addr) {
 191   return Instruction_aarch32::extract(as_uint(addr), 27, 24)  == 0b1011;
 192 }
 193 
 194 NativeImmCall* NativeImmCall::from(address addr) {
 195   assert(NativeImmCall::is_at(addr), "");
 196   return (NativeImmCall*) addr;
 197 }
 198 
 199 //-------------------------------------------------------------------
 200 
 201 Register NativeRegCall::destination() const {
 202   assert(is_reg_call(), "not call");
 203   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 204 }
 205 
 206 bool NativeRegCall::is_at(address addr) {
 207   unsigned insn = as_uint(addr);
 208   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0011;
 209 }
 210 
 211 NativeRegCall* NativeRegCall::from(address addr) {
 212   assert(NativeRegCall::is_at(addr), "");
 213   return (NativeRegCall*) addr;
 214 }
 215 
 216 //-------------------------------------------------------------------
 217 
 218 void NativeMovConstReg::verify() {
 219   if (!is_mov_const_reg()) {
 220     fatal("not a call");
 221   }
 222 }
 223 
 224 intptr_t NativeMovConstReg::data() const {
 225   // FIXME seems not very roboust
 226   // das(uint64_t(addr()),2);
 227   return (intptr_t) MacroAssembler::target_addr_for_insn(addr());
 228 }
 229 
 230 void NativeMovConstReg::set_data(intptr_t x) {
 231   // FIXME seems not very roboust
 232   MacroAssembler::pd_patch_instruction(addr(), (address)x);
 233   ICache::invalidate_range(addr(), max_instruction_size);
 234 };
 235 
 236 void NativeMovConstReg::print() {
 237   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 238                 p2i(addr()), data());
 239 }
 240 
 241 Register NativeMovConstReg::destination() const {
 242   Register d = (Register) Instruction_aarch32::extract(as_uint(), 15, 12);
 243   assert(d == (Register) Instruction_aarch32::extract(as_uint(addr() + arm_insn_sz), 15, 12),
 244       "movw and movt should load same register");
 245   return d;
 246 }
 247 
 248 NativeMovConstReg* NativeMovConstReg::from(address addr) {
 249   assert(NativeMovConstReg::is_at(addr), "");
 250   return (NativeMovConstReg*) addr;
 251 }
 252 
 253 bool NativeMovConstReg::is_movw_movt_at(address addr) {
 254   // Hopefully this is almost always ok - not sure about if at end
 255   unsigned insn = as_uint(addr);
 256   unsigned insn2 = as_uint(addr + arm_insn_sz);
 257   return Instruction_aarch32::extract(insn,  27, 20) == 0b00110000 && //mov
 258          Instruction_aarch32::extract(insn2, 27, 20) == 0b00110100;   //movt
 259 }
 260 
 261 bool NativeMovConstReg::is_ldr_literal_at(address addr) {
 262   unsigned insn = as_uint(addr);
 263   return (Instruction_aarch32::extract(insn, 27, 16) & 0b111001011111) == 0b010000011111;
 264 }
 265 
 266 bool NativeMovConstReg::is_at(address addr) {
 267   return NativeMovConstReg::is_movw_movt_at(addr) ||
 268     NativeMovConstReg::is_ldr_literal_at(addr);
 269 }
 270 
 271 
 272 //-------------------------------------------------------------------
 273 // TODO review
 274 address NativeMovRegMem::instruction_address() const {
 275   return addr();
 276 }
 277 
 278 int NativeMovRegMem::offset() const  {
 279   address pc = addr();
 280   unsigned insn = *(unsigned*)pc;
 281   if (Instruction_aarch32::extract(insn, 28, 24) == 0b10000) {
 282     address addr = MacroAssembler::target_addr_for_insn(pc);
 283     return *addr;
 284   } else {
 285     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(addr());
 286   }
 287 }
 288 
 289 void NativeMovRegMem::set_offset(int x) {
 290   address pc = addr();
 291   // FIXME seems not very roboust
 292   MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
 293   ICache::invalidate_range(addr(), instruction_size);
 294 }
 295 
 296 void NativeMovRegMem::verify() {
 297 #ifdef ASSERT
 298   address dest = MacroAssembler::target_addr_for_insn(addr());
 299 #endif
 300 }
 301 
 302 //--------------------------------------------------------------------------------
 303 
 304 void NativeJump::verify() {
 305   if (!is_jump()) {
 306     fatal("not a call");
 307   }
 308 }
 309 
 310 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 311 }
 312 
 313 address NativeJump::jump_destination() const {
 314   assert(is_jump(), "not a call");
 315   if (NativeImmJump::is_at(addr())) {
 316     return NativeImmJump::from(addr())->destination();
 317   } else if (NativeMovConstReg::is_at(addr())) {
 318     return address(NativeMovConstReg::from(addr())->data());
 319   }
 320   ShouldNotReachHere();
 321   return NULL;
 322 }
 323 
 324 void NativeJump::set_jump_destination(address dest) {
 325   assert(is_jump(), "not a call");
 326   if (NativeImmJump::is_at(addr())) {
 327     NativeImmJump::from(addr())->set_destination(dest);
 328   } else if (NativeMovConstReg::is_at(addr())) {
 329     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
 330   } else {
 331     ShouldNotReachHere();
 332   }
 333 }
 334 
 335 address NativeJump::next_instruction_address() const {
 336   assert(is_jump(), "not a call");
 337   if (NativeImmJump::is_at(addr())) {
 338     return NativeImmJump::from(addr())->next_instruction_address();
 339   } else if (NativeMovConstReg::is_at(addr())) {
 340     address after_move = NativeMovConstReg::from(addr())->next_instruction_address();
 341     assert(NativeRegJump::is_at(after_move), "should be jump");
 342     return NativeRegJump::from(after_move)->next_instruction_address();
 343   }
 344   ShouldNotReachHere();
 345   return NULL;
 346 }
 347 
 348 bool NativeJump::is_at(address addr) {
 349   if (NativeImmJump::is_at(addr)) {
 350     return true;
 351   }
 352   if (NativeMovConstReg::is_at(addr)) {
 353     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 354     address next_instr = nm->next_instruction_address();
 355     return NativeRegJump::is_at(next_instr) &&
 356       NativeRegJump::from(next_instr)->destination() == nm->destination();
 357   }
 358   return false;
 359 }
 360 
 361 NativeJump* NativeJump::from(address addr) {
 362   assert(NativeJump::is_at(addr), "");
 363   return (NativeJump*) addr;
 364 }
 365 
 366 // MT-safe inserting of a jump over a jump or a nop (used by
 367 // nmethod::make_not_entrant_or_zombie)
 368 
 369 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 370 
 371   assert(dest == SharedRuntime::get_handle_wrong_method_stub(),
 372      "expected fixed destination of patch");
 373   assert(NativeInstruction::from(verified_entry)->is_jump_or_nop() ||
 374       NativeInstruction::from(verified_entry)->is_sigill_zombie_not_entrant(),
 375          "Aarch32 cannot replace non-jump with jump");
 376 
 377   // Patch this nmethod atomically.
 378   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 379     assert((((intptr_t) dest & 0x3) == 0) && (((intptr_t) verified_entry & 0x3) == 0),
 380         "addresses should be aligned on 4");
 381     ptrdiff_t disp = (dest - verified_entry - 8) >> 2;
 382     guarantee((-(1 << 23) <= disp) && (disp < (1 << 23)), "branch overflow");
 383 
 384     unsigned int insn = (0b11101010 << 24) | (disp & 0xffffff);
 385     *(unsigned int*)verified_entry = insn;
 386   } else {
 387     // We use an illegal instruction for marking a method as
 388     // not_entrant or zombie.
 389     NativeIllegalInstruction::insert(verified_entry);
 390   }
 391 
 392   ICache::invalidate_range(verified_entry, instruction_size);
 393 }
 394 
 395 //-------------------------------------------------------------------
 396 
 397 bool NativeBranchType::is_branch_type(uint32_t insn) {
 398   return Instruction_aarch32::extract(insn, 27, 20) == 0b00010010 &&
 399     Instruction_aarch32::extract(insn, 19, 8) == 0b111111111111;
 400 }
 401 
 402 void NativeBranchType::patch_offset_to(address dest) {
 403   uint32_t insn = as_uint();
 404   const intptr_t off = (dest - (addr() + 8));
 405   assert((off & 3) == 0, "should be");
 406   assert(-32 * 1024 * 1024 <= off && off < 32 * 1024 * 1042,
 407       "new offset should fit in instruction");
 408 
 409   const unsigned off_mask = ((1U << 24) - 1);
 410   insn &= ~off_mask; // mask off offset part
 411   insn |= ((unsigned) off >> 2) & off_mask;
 412 
 413   set_uint(insn);
 414   ICache::invalidate_range(addr_at(0), instruction_size);
 415 }
 416 
 417 //-------------------------------------------------------------------
 418 
 419 address NativeImmJump::destination() const {
 420   assert(is_imm_jump(), "not jump");
 421   return addr() + 8 + 4 * Instruction_aarch32::sextract(as_uint(), 23, 0);
 422 }
 423 
 424 void NativeImmJump::set_destination(address addr) {
 425   assert(is_imm_jump(), "");
 426   patch_offset_to(addr);
 427 }
 428 
 429 bool NativeImmJump::is_at(address addr) {
 430   unsigned insn = as_uint(addr);
 431   return Instruction_aarch32::extract(insn, 27, 24)  == 0b1010;
 432 }
 433 
 434 NativeImmJump* NativeImmJump::from(address addr) {
 435   assert(NativeImmJump::is_at(addr), "");
 436   return (NativeImmJump*) addr;
 437 }
 438 
 439 //-------------------------------------------------------------------
 440 
 441 bool NativeRegJump::is_at(address addr) {
 442   unsigned insn = as_uint(addr);
 443   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0001;
 444 }
 445 
 446 NativeRegJump* NativeRegJump::from(address addr) {
 447   assert(NativeRegJump::is_at(addr), "");
 448   return (NativeRegJump*) addr;
 449 }
 450 
 451 Register NativeRegJump::destination() const {
 452   assert(is_reg_jump(), "");
 453   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 454 }
 455 
 456 //-------------------------------------------------------------------
 457 
 458 bool NativeInstruction::is_safepoint_poll() {
 459   // a safepoint_poll is implemented in two steps as
 460   //
 461   // movw(r9, polling_page & 0xffff);
 462   // movt(r9, polling_page >> 16);
 463   // ldr(r9, [r9, #0]);
 464   //
 465   // We can rely on this instructions order until we have only C1
 466 
 467     if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2))  {
 468       bool res = false;
 469       unsigned paddr = (unsigned)os::get_polling_page();
 470 
 471       unsigned addr_lo = paddr & 0xffff;
 472       unsigned addr_hi = paddr >> 16;
 473 
 474       Register scratch = rscratch1;
 475 
 476       res =        from(addr() - 0x8)->is_movw(scratch, addr_lo);
 477       res = res && from(addr() - 0x4)->is_movt(scratch, addr_hi);
 478       res = res && from(addr() - 0x0)->is_ldr(scratch, Address(scratch));
 479 
 480       return res;
 481   } else {
 482     assert(false, "not implemented");
 483     return false;
 484   }
 485 }
 486 
 487 bool NativeInstruction::is_movt(Register dst, unsigned imm, Assembler::Condition cond) {
 488   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110100;
 489   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 490   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 491   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 492   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == cond;
 493 
 494   return a1 && a2 && a3 && a4 && a5;
 495 }
 496 
 497 bool NativeInstruction::is_movw(Register dst, unsigned imm, Assembler::Condition cond) {
 498   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110000;
 499   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 500   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 501   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 502   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == cond;
 503 
 504   return a1 && a2 && a3 && a4 && a5;
 505 }
 506 
 507 bool NativeInstruction::is_ldr(Register dst, Address addr, Assembler::Condition cond) {
 508     assert(addr.get_mode() == Address::imm, "unimplemented");
 509     assert(addr.get_wb_mode() == Address::off, "unimplemented");
 510     assert(addr.index() == noreg, "unimplemented");
 511     assert(addr.offset() == 0, "unimplemented");
 512 
 513     bool b0 = Instruction_aarch32::extract(uint_at(0), 24, 24) == 1; //P
 514     bool b1 = Instruction_aarch32::extract(uint_at(0), 23, 23) == 1; //U
 515     bool b2 = Instruction_aarch32::extract(uint_at(0), 21, 21) == 0; //W
 516     bool b3 = Instruction_aarch32::extract(uint_at(0), 19, 16) == (unsigned)addr.base();
 517     bool b4 = Instruction_aarch32::extract(uint_at(0), 11, 0) == 0;
 518 
 519     bool a1 = b0 && b1 && b2 && b3 && b4; //Address encoding
 520 
 521     bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 522     bool a3 = Instruction_aarch32::extract(uint_at(0), 20, 20) == 1;
 523     bool a4 = Instruction_aarch32::extract(uint_at(0), 22, 22) == 0;
 524     bool a5 = Instruction_aarch32::extract(uint_at(0), 27, 25) == 0b010;
 525     bool a6 = Instruction_aarch32::extract(uint_at(0), 31, 28) == cond;
 526 
 527     return a1 && a2 && a3 && a4 && a5 && a6;
 528 }
 529 
 530 
 531 bool NativeInstruction::is_movt() {
 532   return Instruction_aarch32::extract(int_at(0), 27, 20) == 0b00110100;
 533 }
 534 
 535 bool NativeInstruction::is_orr() {
 536   return Instruction_aarch32::extract(int_at(0), 27, 21) == 0b0011100;
 537 }
 538 
 539 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 540   return as_uint() == 0xe7fdeafd; // udf #0xdead
 541 }
 542 
 543 void NativeIllegalInstruction::insert(address code_pos) {
 544   *(juint*)code_pos = 0xe7fdeafd; // udf #0xdead
 545 }
 546 
 547 //-------------------------------------------------------------------
 548 
 549 void NativeGeneralJump::verify() {  }
 550 
 551 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 552   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 553 
 554   CodeBuffer cb(code_pos, instruction_size);
 555   MacroAssembler a(&cb);
 556 
 557   a.b(entry);
 558 
 559   ICache::invalidate_range(code_pos, instruction_size);
 560 }
 561 
 562 // MT-safe patching of a long jump instruction.
 563 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 564   // FIXME NativeCall from patching_epilog nops filling
 565   const int bytes_to_copy = NativeCall::instruction_size;
 566   const address patching_switch_addr = code_buffer + bytes_to_copy;
 567   NativeImmJump* patching_switch = NativeImmJump::from(patching_switch_addr);
 568   assert(patching_switch->destination() == patching_switch_addr + NativeInstruction::arm_insn_sz,
 569          "switch should be branch to next instr at this point");
 570   patching_switch->set_destination(instr_addr + bytes_to_copy);
 571   ICache::invalidate_word(patching_switch_addr);
 572 
 573   NativeImmJump* nj = NativeImmJump::from(instr_addr); // checking that it is a jump
 574   nj->set_destination(code_buffer);
 575   ICache::invalidate_word(instr_addr);
 576 
 577 }