1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "nativeInst_x86.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/handles.hpp"
  31 #include "runtime/sharedRuntime.hpp"
  32 #include "runtime/stubRoutines.hpp"
  33 #include "utilities/ostream.hpp"
  34 #ifdef COMPILER1
  35 #include "c1/c1_Runtime1.hpp"
  36 #endif
  37 
  38 void NativeInstruction::wrote(int offset) {
  39   ICache::invalidate_word(addr_at(offset));
  40 }
  41 
  42 void NativeLoadGot::report_and_fail() const {
  43   tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
  44   fatal("not a indirect rip mov to rbx");
  45 }
  46 
  47 void NativeLoadGot::verify() const {
  48   if (has_rex) {
  49     int rex = ubyte_at(0);
  50     if (rex != rex_prefix) {
  51       report_and_fail();
  52     }
  53   }
  54 
  55   int inst = ubyte_at(rex_size);
  56   if (inst != instruction_code) {
  57     report_and_fail();
  58   }
  59   int modrm = ubyte_at(rex_size + 1);
  60   if (modrm != modrm_rbx_code && modrm != modrm_rax_code) {
  61     report_and_fail();
  62   }
  63 }
  64 
  65 intptr_t NativeLoadGot::data() const {
  66   return *(intptr_t *) got_address();
  67 }
  68 
  69 address NativePltCall::destination() const {
  70   NativeGotJump* jump = nativeGotJump_at(plt_jump());
  71   return jump->destination();
  72 }
  73 
  74 address NativePltCall::plt_entry() const {
  75   return return_address() + displacement();
  76 }
  77 
  78 address NativePltCall::plt_jump() const {
  79   address entry = plt_entry();
  80   // Virtual PLT code has move instruction first
  81   if (((NativeGotJump*)entry)->is_GotJump()) {
  82     return entry;
  83   } else {
  84     return nativeLoadGot_at(entry)->next_instruction_address();
  85   }
  86 }
  87 
  88 address NativePltCall::plt_load_got() const {
  89   address entry = plt_entry();
  90   if (!((NativeGotJump*)entry)->is_GotJump()) {
  91     // Virtual PLT code has move instruction first
  92     return entry;
  93   } else {
  94     // Static PLT code has move instruction second (from c2i stub)
  95     return nativeGotJump_at(entry)->next_instruction_address();
  96   }
  97 }
  98 
  99 address NativePltCall::plt_c2i_stub() const {
 100   address entry = plt_load_got();
 101   // This method should be called only for static calls which has C2I stub.
 102   NativeLoadGot* load = nativeLoadGot_at(entry);
 103   return entry;
 104 }
 105 
 106 address NativePltCall::plt_resolve_call() const {
 107   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 108   address entry = jump->next_instruction_address();
 109   if (((NativeGotJump*)entry)->is_GotJump()) {
 110     return entry;
 111   } else {
 112     // c2i stub 2 instructions
 113     entry = nativeLoadGot_at(entry)->next_instruction_address();
 114     return nativeGotJump_at(entry)->next_instruction_address();
 115   }
 116 }
 117 
 118 void NativePltCall::reset_to_plt_resolve_call() {
 119   set_destination_mt_safe(plt_resolve_call());
 120 }
 121 
 122 void NativePltCall::set_destination_mt_safe(address dest) {
 123   // rewriting the value in the GOT, it should always be aligned
 124   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 125   address* got = (address *) jump->got_address();
 126   *got = dest;
 127 }
 128 
 129 void NativePltCall::set_stub_to_clean() {
 130   NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub());
 131   NativeGotJump* jump          = nativeGotJump_at(method_loader->next_instruction_address());
 132   method_loader->set_data(0);
 133   jump->set_jump_destination((address)-1);
 134 }
 135 
 136 void NativePltCall::verify() const {
 137   // Make sure code pattern is actually a call rip+off32 instruction.
 138   int inst = ubyte_at(0);
 139   if (inst != instruction_code) {
 140     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
 141                                                         inst);
 142     fatal("not a call rip+off32");
 143   }
 144 }
 145 
 146 address NativeGotJump::destination() const {
 147   address *got_entry = (address *) got_address();
 148   return *got_entry;
 149 }
 150 
 151 void NativeGotJump::verify() const {
 152   int inst = ubyte_at(0);
 153   if (inst != instruction_code) {
 154     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
 155                                                         inst);
 156     fatal("not a indirect rip jump");
 157   }
 158 }
 159 
 160 void NativeCall::verify() {
 161   // Make sure code pattern is actually a call imm32 instruction.
 162   int inst = ubyte_at(0);
 163   if (inst != instruction_code) {
 164     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
 165                                                         inst);
 166     fatal("not a call disp32");
 167   }
 168 }
 169 
 170 address NativeCall::destination() const {
 171   // Getting the destination of a call isn't safe because that call can
 172   // be getting patched while you're calling this.  There's only special
 173   // places where this can be called but not automatically verifiable by
 174   // checking which locks are held.  The solution is true atomic patching
 175   // on x86, nyi.
 176   return return_address() + displacement();
 177 }
 178 
 179 void NativeCall::print() {
 180   tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT,
 181                 p2i(instruction_address()), p2i(destination()));
 182 }
 183 
 184 // Inserts a native call instruction at a given pc
 185 void NativeCall::insert(address code_pos, address entry) {
 186   intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
 187 #ifdef AMD64
 188   guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
 189 #endif // AMD64
 190   *code_pos = instruction_code;
 191   *((int32_t *)(code_pos+1)) = (int32_t) disp;
 192   ICache::invalidate_range(code_pos, instruction_size);
 193 }
 194 
 195 // MT-safe patching of a call instruction.
 196 // First patches first word of instruction to two jmp's that jmps to them
 197 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
 198 // the jmp's with the first 4 byte of the new instruction.
 199 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
 200   assert(Patching_lock->is_locked() ||
 201          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 202   assert (instr_addr != NULL, "illegal address for code patching");
 203 
 204   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
 205   if (os::is_MP()) {
 206     guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
 207   }
 208 
 209   // First patch dummy jmp in place
 210   unsigned char patch[4];
 211   assert(sizeof(patch)==sizeof(jint), "sanity check");
 212   patch[0] = 0xEB;       // jmp rel8
 213   patch[1] = 0xFE;       // jmp to self
 214   patch[2] = 0xEB;
 215   patch[3] = 0xFE;
 216 
 217   // First patch dummy jmp in place
 218   *(jint*)instr_addr = *(jint *)patch;
 219 
 220   // Invalidate.  Opteron requires a flush after every write.
 221   n_call->wrote(0);
 222 
 223   // Patch 4th byte
 224   instr_addr[4] = code_buffer[4];
 225 
 226   n_call->wrote(4);
 227 
 228   // Patch bytes 0-3
 229   *(jint*)instr_addr = *(jint *)code_buffer;
 230 
 231   n_call->wrote(0);
 232 
 233 #ifdef ASSERT
 234    // verify patching
 235    for ( int i = 0; i < instruction_size; i++) {
 236      address ptr = (address)((intptr_t)code_buffer + i);
 237      int a_byte = (*ptr) & 0xFF;
 238      assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
 239    }
 240 #endif
 241 
 242 }
 243 
 244 
 245 // Similar to replace_mt_safe, but just changes the destination.  The
 246 // important thing is that free-running threads are able to execute this
 247 // call instruction at all times.  If the displacement field is aligned
 248 // we can simply rely on atomicity of 32-bit writes to make sure other threads
 249 // will see no intermediate states.  Otherwise, the first two bytes of the
 250 // call are guaranteed to be aligned, and can be atomically patched to a
 251 // self-loop to guard the instruction while we change the other bytes.
 252 
 253 // We cannot rely on locks here, since the free-running threads must run at
 254 // full speed.
 255 //
 256 // Used in the runtime linkage of calls; see class CompiledIC.
 257 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
 258 void NativeCall::set_destination_mt_safe(address dest) {
 259   debug_only(verify());
 260   // Make sure patching code is locked.  No two threads can patch at the same
 261   // time but one may be executing this code.
 262   assert(Patching_lock->is_locked() ||
 263          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 264   // Both C1 and C2 should now be generating code which aligns the patched address
 265   // to be within a single cache line except that C1 does not do the alignment on
 266   // uniprocessor systems.
 267   bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size ==
 268                     ((uintptr_t)displacement_address() + 3) / cache_line_size;
 269 
 270   guarantee(!os::is_MP() || is_aligned, "destination must be aligned");
 271 
 272   if (is_aligned) {
 273     // Simple case:  The destination lies within a single cache line.
 274     set_destination(dest);
 275   } else if ((uintptr_t)instruction_address() / cache_line_size ==
 276              ((uintptr_t)instruction_address()+1) / cache_line_size) {
 277     // Tricky case:  The instruction prefix lies within a single cache line.
 278     intptr_t disp = dest - return_address();
 279 #ifdef AMD64
 280     guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
 281 #endif // AMD64
 282 
 283     int call_opcode = instruction_address()[0];
 284 
 285     // First patch dummy jump in place:
 286     {
 287       u_char patch_jump[2];
 288       patch_jump[0] = 0xEB;       // jmp rel8
 289       patch_jump[1] = 0xFE;       // jmp to self
 290 
 291       assert(sizeof(patch_jump)==sizeof(short), "sanity check");
 292       *(short*)instruction_address() = *(short*)patch_jump;
 293     }
 294     // Invalidate.  Opteron requires a flush after every write.
 295     wrote(0);
 296 
 297     // (Note: We assume any reader which has already started to read
 298     // the unpatched call will completely read the whole unpatched call
 299     // without seeing the next writes we are about to make.)
 300 
 301     // Next, patch the last three bytes:
 302     u_char patch_disp[5];
 303     patch_disp[0] = call_opcode;
 304     *(int32_t*)&patch_disp[1] = (int32_t)disp;
 305     assert(sizeof(patch_disp)==instruction_size, "sanity check");
 306     for (int i = sizeof(short); i < instruction_size; i++)
 307       instruction_address()[i] = patch_disp[i];
 308 
 309     // Invalidate.  Opteron requires a flush after every write.
 310     wrote(sizeof(short));
 311 
 312     // (Note: We assume that any reader which reads the opcode we are
 313     // about to repatch will also read the writes we just made.)
 314 
 315     // Finally, overwrite the jump:
 316     *(short*)instruction_address() = *(short*)patch_disp;
 317     // Invalidate.  Opteron requires a flush after every write.
 318     wrote(0);
 319 
 320     debug_only(verify());
 321     guarantee(destination() == dest, "patch succeeded");
 322   } else {
 323     // Impossible:  One or the other must be atomically writable.
 324     ShouldNotReachHere();
 325   }
 326 }
 327 
 328 
 329 void NativeMovConstReg::verify() {
 330 #ifdef AMD64
 331   // make sure code pattern is actually a mov reg64, imm64 instruction
 332   if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) ||
 333       (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) {
 334     print();
 335     fatal("not a REX.W[B] mov reg64, imm64");
 336   }
 337 #else
 338   // make sure code pattern is actually a mov reg, imm32 instruction
 339   u_char test_byte = *(u_char*)instruction_address();
 340   u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
 341   if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32");
 342 #endif // AMD64
 343 }
 344 
 345 
 346 void NativeMovConstReg::print() {
 347   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 348                 p2i(instruction_address()), data());
 349 }
 350 
 351 //-------------------------------------------------------------------
 352 
 353 int NativeMovRegMem::instruction_start() const {
 354   int off = 0;
 355   u_char instr_0 = ubyte_at(off);
 356 
 357   // See comment in Assembler::locate_operand() about VEX prefixes.
 358   if (instr_0 == instruction_VEX_prefix_2bytes) {
 359     assert((UseAVX > 0), "shouldn't have VEX prefix");
 360     NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
 361     return 2;
 362   }
 363   if (instr_0 == instruction_VEX_prefix_3bytes) {
 364     assert((UseAVX > 0), "shouldn't have VEX prefix");
 365     NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
 366     return 3;
 367   }
 368   if (instr_0 == instruction_EVEX_prefix_4bytes) {
 369     assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix");
 370     return 4;
 371   }
 372 
 373   // First check to see if we have a (prefixed or not) xor
 374   if (instr_0 >= instruction_prefix_wide_lo && // 0x40
 375       instr_0 <= instruction_prefix_wide_hi) { // 0x4f
 376     off++;
 377     instr_0 = ubyte_at(off);
 378   }
 379 
 380   if (instr_0 == instruction_code_xor) {
 381     off += 2;
 382     instr_0 = ubyte_at(off);
 383   }
 384 
 385   // Now look for the real instruction and the many prefix/size specifiers.
 386 
 387   if (instr_0 == instruction_operandsize_prefix ) {  // 0x66
 388     off++; // Not SSE instructions
 389     instr_0 = ubyte_at(off);
 390   }
 391 
 392   if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
 393        instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2
 394     off++;
 395     instr_0 = ubyte_at(off);
 396   }
 397 
 398   if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
 399        instr_0 <= instruction_prefix_wide_hi) { // 0x4f
 400     off++;
 401     instr_0 = ubyte_at(off);
 402   }
 403 
 404 
 405   if (instr_0 == instruction_extended_prefix ) {  // 0x0f
 406     off++;
 407   }
 408 
 409   return off;
 410 }
 411 
 412 address NativeMovRegMem::instruction_address() const {
 413   return addr_at(instruction_start());
 414 }
 415 
 416 address NativeMovRegMem::next_instruction_address() const {
 417   address ret = instruction_address() + instruction_size;
 418   u_char instr_0 =  *(u_char*) instruction_address();
 419   switch (instr_0) {
 420   case instruction_operandsize_prefix:
 421 
 422     fatal("should have skipped instruction_operandsize_prefix");
 423     break;
 424 
 425   case instruction_extended_prefix:
 426     fatal("should have skipped instruction_extended_prefix");
 427     break;
 428 
 429   case instruction_code_mem2reg_movslq: // 0x63
 430   case instruction_code_mem2reg_movzxb: // 0xB6
 431   case instruction_code_mem2reg_movsxb: // 0xBE
 432   case instruction_code_mem2reg_movzxw: // 0xB7
 433   case instruction_code_mem2reg_movsxw: // 0xBF
 434   case instruction_code_reg2mem:        // 0x89 (q/l)
 435   case instruction_code_mem2reg:        // 0x8B (q/l)
 436   case instruction_code_reg2memb:       // 0x88
 437   case instruction_code_mem2regb:       // 0x8a
 438 
 439   case instruction_code_lea:            // 0x8d
 440 
 441   case instruction_code_float_s:        // 0xd9 fld_s a
 442   case instruction_code_float_d:        // 0xdd fld_d a
 443 
 444   case instruction_code_xmm_load:       // 0x10
 445   case instruction_code_xmm_store:      // 0x11
 446   case instruction_code_xmm_lpd:        // 0x12
 447     {
 448       // If there is an SIB then instruction is longer than expected
 449       u_char mod_rm = *(u_char*)(instruction_address() + 1);
 450       if ((mod_rm & 7) == 0x4) {
 451         ret++;
 452       }
 453     }
 454   case instruction_code_xor:
 455     fatal("should have skipped xor lead in");
 456     break;
 457 
 458   default:
 459     fatal("not a NativeMovRegMem");
 460   }
 461   return ret;
 462 
 463 }
 464 
 465 int NativeMovRegMem::offset() const{
 466   int off = data_offset + instruction_start();
 467   u_char mod_rm = *(u_char*)(instruction_address() + 1);
 468   // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
 469   // the encoding to use an SIB byte. Which will have the nnnn
 470   // field off by one byte
 471   if ((mod_rm & 7) == 0x4) {
 472     off++;
 473   }
 474   return int_at(off);
 475 }
 476 
 477 void NativeMovRegMem::set_offset(int x) {
 478   int off = data_offset + instruction_start();
 479   u_char mod_rm = *(u_char*)(instruction_address() + 1);
 480   // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
 481   // the encoding to use an SIB byte. Which will have the nnnn
 482   // field off by one byte
 483   if ((mod_rm & 7) == 0x4) {
 484     off++;
 485   }
 486   set_int_at(off, x);
 487 }
 488 
 489 void NativeMovRegMem::verify() {
 490   // make sure code pattern is actually a mov [reg+offset], reg instruction
 491   u_char test_byte = *(u_char*)instruction_address();
 492   switch (test_byte) {
 493     case instruction_code_reg2memb:  // 0x88 movb a, r
 494     case instruction_code_reg2mem:   // 0x89 movl a, r (can be movq in 64bit)
 495     case instruction_code_mem2regb:  // 0x8a movb r, a
 496     case instruction_code_mem2reg:   // 0x8b movl r, a (can be movq in 64bit)
 497       break;
 498 
 499     case instruction_code_mem2reg_movslq: // 0x63 movsql r, a
 500     case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb)
 501     case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw)
 502     case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb)
 503     case instruction_code_mem2reg_movsxw: // 0xbf  movswl r, a (movsxw)
 504       break;
 505 
 506     case instruction_code_float_s:   // 0xd9 fld_s a
 507     case instruction_code_float_d:   // 0xdd fld_d a
 508     case instruction_code_xmm_load:  // 0x10 movsd xmm, a
 509     case instruction_code_xmm_store: // 0x11 movsd a, xmm
 510     case instruction_code_xmm_lpd:   // 0x12 movlpd xmm, a
 511       break;
 512 
 513     case instruction_code_lea:       // 0x8d lea r, a
 514       break;
 515 
 516     default:
 517           fatal ("not a mov [reg+offs], reg instruction");
 518   }
 519 }
 520 
 521 
 522 void NativeMovRegMem::print() {
 523   tty->print_cr(PTR_FORMAT ": mov reg, [reg + %x]", p2i(instruction_address()), offset());
 524 }
 525 
 526 //-------------------------------------------------------------------
 527 
 528 void NativeLoadAddress::verify() {
 529   // make sure code pattern is actually a mov [reg+offset], reg instruction
 530   u_char test_byte = *(u_char*)instruction_address();
 531 #ifdef _LP64
 532   if ( (test_byte == instruction_prefix_wide ||
 533         test_byte == instruction_prefix_wide_extended) ) {
 534     test_byte = *(u_char*)(instruction_address() + 1);
 535   }
 536 #endif // _LP64
 537   if ( ! ((test_byte == lea_instruction_code)
 538           LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
 539     fatal ("not a lea reg, [reg+offs] instruction");
 540   }
 541 }
 542 
 543 
 544 void NativeLoadAddress::print() {
 545   tty->print_cr(PTR_FORMAT ": lea [reg + %x], reg", p2i(instruction_address()), offset());
 546 }
 547 
 548 //--------------------------------------------------------------------------------
 549 
 550 void NativeJump::verify() {
 551   if (*(u_char*)instruction_address() != instruction_code) {
 552     // far jump
 553     NativeMovConstReg* mov = nativeMovConstReg_at(instruction_address());
 554     NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address());
 555     if (!jmp->is_jump_reg()) {
 556       fatal("not a jump instruction");
 557     }
 558   }
 559 }
 560 
 561 
 562 void NativeJump::insert(address code_pos, address entry) {
 563   intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
 564 #ifdef AMD64
 565   guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
 566 #endif // AMD64
 567 
 568   *code_pos = instruction_code;
 569   *((int32_t*)(code_pos + 1)) = (int32_t)disp;
 570 
 571   ICache::invalidate_range(code_pos, instruction_size);
 572 }
 573 
 574 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 575   // Patching to not_entrant can happen while activations of the method are
 576   // in use. The patching in that instance must happen only when certain
 577   // alignment restrictions are true. These guarantees check those
 578   // conditions.
 579 #ifdef AMD64
 580   const int linesize = 64;
 581 #else
 582   const int linesize = 32;
 583 #endif // AMD64
 584 
 585   // Must be wordSize aligned
 586   guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0,
 587             "illegal address for code patching 2");
 588   // First 5 bytes must be within the same cache line - 4827828
 589   guarantee((uintptr_t) verified_entry / linesize ==
 590             ((uintptr_t) verified_entry + 4) / linesize,
 591             "illegal address for code patching 3");
 592 }
 593 
 594 
 595 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
 596 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes.
 597 // First patches the first word atomically to be a jump to itself.
 598 // Then patches the last byte  and then atomically patches the first word (4-bytes),
 599 // thus inserting the desired jump
 600 // This code is mt-safe with the following conditions: entry point is 4 byte aligned,
 601 // entry point is in same cache line as unverified entry point, and the instruction being
 602 // patched is >= 5 byte (size of patch).
 603 //
 604 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit.
 605 // In C1 the restriction is enforced by CodeEmitter::method_entry
 606 // In JVMCI, the restriction is enforced by HotSpotFrameContext.enter(...)
 607 //
 608 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 609   // complete jump instruction (to be inserted) is in code_buffer;
 610   unsigned char code_buffer[5];
 611   code_buffer[0] = instruction_code;
 612   intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4);
 613 #ifdef AMD64
 614   guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
 615 #endif // AMD64
 616   *(int32_t*)(code_buffer + 1) = (int32_t)disp;
 617 
 618   check_verified_entry_alignment(entry, verified_entry);
 619 
 620   // Can't call nativeJump_at() because it's asserts jump exists
 621   NativeJump* n_jump = (NativeJump*) verified_entry;
 622 
 623   //First patch dummy jmp in place
 624 
 625   unsigned char patch[4];
 626   assert(sizeof(patch)==sizeof(int32_t), "sanity check");
 627   patch[0] = 0xEB;       // jmp rel8
 628   patch[1] = 0xFE;       // jmp to self
 629   patch[2] = 0xEB;
 630   patch[3] = 0xFE;
 631 
 632   // First patch dummy jmp in place
 633   *(int32_t*)verified_entry = *(int32_t *)patch;
 634 
 635   n_jump->wrote(0);
 636 
 637   // Patch 5th byte (from jump instruction)
 638   verified_entry[4] = code_buffer[4];
 639 
 640   n_jump->wrote(4);
 641 
 642   // Patch bytes 0-3 (from jump instruction)
 643   *(int32_t*)verified_entry = *(int32_t *)code_buffer;
 644   // Invalidate.  Opteron requires a flush after every write.
 645   n_jump->wrote(0);
 646 
 647 }
 648 
 649 address NativeFarJump::jump_destination() const          {
 650   NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0));
 651   return (address)mov->data();
 652 }
 653 
 654 void NativeFarJump::verify() {
 655   if (is_far_jump()) {
 656     NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0));
 657     NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address());
 658     if (jmp->is_jump_reg()) return;
 659   }
 660   fatal("not a jump instruction");
 661 }
 662 
 663 void NativePopReg::insert(address code_pos, Register reg) {
 664   assert(reg->encoding() < 8, "no space for REX");
 665   assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update");
 666   *code_pos = (u_char)(instruction_code | reg->encoding());
 667   ICache::invalidate_range(code_pos, instruction_size);
 668 }
 669 
 670 
 671 void NativeIllegalInstruction::insert(address code_pos) {
 672   assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update");
 673   *(short *)code_pos = instruction_code;
 674   ICache::invalidate_range(code_pos, instruction_size);
 675 }
 676 
 677 void NativeGeneralJump::verify() {
 678   assert(((NativeInstruction *)this)->is_jump() ||
 679          ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
 680 }
 681 
 682 
 683 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 684   intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
 685 #ifdef AMD64
 686   guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
 687 #endif // AMD64
 688 
 689   *code_pos = unconditional_long_jump;
 690   *((int32_t *)(code_pos+1)) = (int32_t) disp;
 691   ICache::invalidate_range(code_pos, instruction_size);
 692 }
 693 
 694 
 695 // MT-safe patching of a long jump instruction.
 696 // First patches first word of instruction to two jmp's that jmps to them
 697 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
 698 // the jmp's with the first 4 byte of the new instruction.
 699 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 700    assert (instr_addr != NULL, "illegal address for code patching (4)");
 701    NativeGeneralJump* n_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a jump
 702 
 703    // Temporary code
 704    unsigned char patch[4];
 705    assert(sizeof(patch)==sizeof(int32_t), "sanity check");
 706    patch[0] = 0xEB;       // jmp rel8
 707    patch[1] = 0xFE;       // jmp to self
 708    patch[2] = 0xEB;
 709    patch[3] = 0xFE;
 710 
 711    // First patch dummy jmp in place
 712    *(int32_t*)instr_addr = *(int32_t *)patch;
 713     n_jump->wrote(0);
 714 
 715    // Patch 4th byte
 716    instr_addr[4] = code_buffer[4];
 717 
 718     n_jump->wrote(4);
 719 
 720    // Patch bytes 0-3
 721    *(jint*)instr_addr = *(jint *)code_buffer;
 722 
 723     n_jump->wrote(0);
 724 
 725 #ifdef ASSERT
 726    // verify patching
 727    for ( int i = 0; i < instruction_size; i++) {
 728      address ptr = (address)((intptr_t)code_buffer + i);
 729      int a_byte = (*ptr) & 0xFF;
 730      assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
 731    }
 732 #endif
 733 
 734 }
 735 
 736 
 737 
 738 address NativeGeneralJump::jump_destination() const {
 739   int op_code = ubyte_at(0);
 740   bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F);
 741   int  offset  = (op_code == 0x0F)  ? 2 : 1;
 742   int  length  = offset + ((is_rel32off) ? 4 : 1);
 743 
 744   if (is_rel32off)
 745     return addr_at(0) + length + int_at(offset);
 746   else
 747     return addr_at(0) + length + sbyte_at(offset);
 748 }