1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "nativeInst_x86.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/handles.hpp" 31 #include "runtime/sharedRuntime.hpp" 32 #include "runtime/stubRoutines.hpp" 33 #include "utilities/ostream.hpp" 34 #ifdef COMPILER1 35 #include "c1/c1_Runtime1.hpp" 36 #endif 37 38 void NativeInstruction::wrote(int offset) { 39 ICache::invalidate_word(addr_at(offset)); 40 } 41 42 void NativeLoadGot::report_and_fail() const { 43 tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address())); 44 fatal("not a indirect rip mov to rbx"); 45 } 46 47 void NativeLoadGot::verify() const { 48 if (has_rex) { 49 int rex = ubyte_at(0); 50 if (rex != rex_prefix) { 51 report_and_fail(); 52 } 53 } 54 55 int inst = ubyte_at(rex_size); 56 if (inst != instruction_code) { 57 report_and_fail(); 58 } 59 int modrm = ubyte_at(rex_size + 1); 60 if (modrm != modrm_rbx_code && modrm != modrm_rax_code) { 61 report_and_fail(); 62 } 63 } 64 65 intptr_t NativeLoadGot::data() const { 66 return *(intptr_t *) got_address(); 67 } 68 69 address NativePltCall::destination() const { 70 NativeGotJump* jump = nativeGotJump_at(plt_jump()); 71 return jump->destination(); 72 } 73 74 address NativePltCall::plt_entry() const { 75 return return_address() + displacement(); 76 } 77 78 address NativePltCall::plt_jump() const { 79 address entry = plt_entry(); 80 // Virtual PLT code has move instruction first 81 if (((NativeGotJump*)entry)->is_GotJump()) { 82 return entry; 83 } else { 84 return nativeLoadGot_at(entry)->next_instruction_address(); 85 } 86 } 87 88 address NativePltCall::plt_load_got() const { 89 address entry = plt_entry(); 90 if (!((NativeGotJump*)entry)->is_GotJump()) { 91 // Virtual PLT code has move instruction first 92 return entry; 93 } else { 94 // Static PLT code has move instruction second (from c2i stub) 95 return nativeGotJump_at(entry)->next_instruction_address(); 96 } 97 } 98 99 address NativePltCall::plt_c2i_stub() const { 100 address entry = plt_load_got(); 101 // This method should be called only for static calls which has C2I stub. 102 NativeLoadGot* load = nativeLoadGot_at(entry); 103 return entry; 104 } 105 106 address NativePltCall::plt_resolve_call() const { 107 NativeGotJump* jump = nativeGotJump_at(plt_jump()); 108 address entry = jump->next_instruction_address(); 109 if (((NativeGotJump*)entry)->is_GotJump()) { 110 return entry; 111 } else { 112 // c2i stub 2 instructions 113 entry = nativeLoadGot_at(entry)->next_instruction_address(); 114 return nativeGotJump_at(entry)->next_instruction_address(); 115 } 116 } 117 118 void NativePltCall::reset_to_plt_resolve_call() { 119 set_destination_mt_safe(plt_resolve_call()); 120 } 121 122 void NativePltCall::set_destination_mt_safe(address dest) { 123 // rewriting the value in the GOT, it should always be aligned 124 NativeGotJump* jump = nativeGotJump_at(plt_jump()); 125 address* got = (address *) jump->got_address(); 126 *got = dest; 127 } 128 129 void NativePltCall::set_stub_to_clean() { 130 NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub()); 131 NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); 132 method_loader->set_data(0); 133 jump->set_jump_destination((address)-1); 134 } 135 136 void NativePltCall::verify() const { 137 // Make sure code pattern is actually a call rip+off32 instruction. 138 int inst = ubyte_at(0); 139 if (inst != instruction_code) { 140 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), 141 inst); 142 fatal("not a call rip+off32"); 143 } 144 } 145 146 address NativeGotJump::destination() const { 147 address *got_entry = (address *) got_address(); 148 return *got_entry; 149 } 150 151 void NativeGotJump::verify() const { 152 int inst = ubyte_at(0); 153 if (inst != instruction_code) { 154 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), 155 inst); 156 fatal("not a indirect rip jump"); 157 } 158 } 159 160 void NativeCall::verify() { 161 // Make sure code pattern is actually a call imm32 instruction. 162 int inst = ubyte_at(0); 163 if (inst != instruction_code) { 164 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), 165 inst); 166 fatal("not a call disp32"); 167 } 168 } 169 170 address NativeCall::destination() const { 171 // Getting the destination of a call isn't safe because that call can 172 // be getting patched while you're calling this. There's only special 173 // places where this can be called but not automatically verifiable by 174 // checking which locks are held. The solution is true atomic patching 175 // on x86, nyi. 176 return return_address() + displacement(); 177 } 178 179 void NativeCall::print() { 180 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT, 181 p2i(instruction_address()), p2i(destination())); 182 } 183 184 // Inserts a native call instruction at a given pc 185 void NativeCall::insert(address code_pos, address entry) { 186 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 187 #ifdef AMD64 188 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); 189 #endif // AMD64 190 *code_pos = instruction_code; 191 *((int32_t *)(code_pos+1)) = (int32_t) disp; 192 ICache::invalidate_range(code_pos, instruction_size); 193 } 194 195 // MT-safe patching of a call instruction. 196 // First patches first word of instruction to two jmp's that jmps to them 197 // selfs (spinlock). Then patches the last byte, and then atomicly replaces 198 // the jmp's with the first 4 byte of the new instruction. 199 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { 200 assert(Patching_lock->is_locked() || 201 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 202 assert (instr_addr != NULL, "illegal address for code patching"); 203 204 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call 205 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); 206 207 // First patch dummy jmp in place 208 unsigned char patch[4]; 209 assert(sizeof(patch)==sizeof(jint), "sanity check"); 210 patch[0] = 0xEB; // jmp rel8 211 patch[1] = 0xFE; // jmp to self 212 patch[2] = 0xEB; 213 patch[3] = 0xFE; 214 215 // First patch dummy jmp in place 216 *(jint*)instr_addr = *(jint *)patch; 217 218 // Invalidate. Opteron requires a flush after every write. 219 n_call->wrote(0); 220 221 // Patch 4th byte 222 instr_addr[4] = code_buffer[4]; 223 224 n_call->wrote(4); 225 226 // Patch bytes 0-3 227 *(jint*)instr_addr = *(jint *)code_buffer; 228 229 n_call->wrote(0); 230 231 #ifdef ASSERT 232 // verify patching 233 for ( int i = 0; i < instruction_size; i++) { 234 address ptr = (address)((intptr_t)code_buffer + i); 235 int a_byte = (*ptr) & 0xFF; 236 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); 237 } 238 #endif 239 240 } 241 242 243 // Similar to replace_mt_safe, but just changes the destination. The 244 // important thing is that free-running threads are able to execute this 245 // call instruction at all times. If the displacement field is aligned 246 // we can simply rely on atomicity of 32-bit writes to make sure other threads 247 // will see no intermediate states. Otherwise, the first two bytes of the 248 // call are guaranteed to be aligned, and can be atomically patched to a 249 // self-loop to guard the instruction while we change the other bytes. 250 251 // We cannot rely on locks here, since the free-running threads must run at 252 // full speed. 253 // 254 // Used in the runtime linkage of calls; see class CompiledIC. 255 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) 256 void NativeCall::set_destination_mt_safe(address dest) { 257 debug_only(verify()); 258 // Make sure patching code is locked. No two threads can patch at the same 259 // time but one may be executing this code. 260 assert(Patching_lock->is_locked() || 261 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 262 // Both C1 and C2 should now be generating code which aligns the patched address 263 // to be within a single cache line. 264 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size == 265 ((uintptr_t)displacement_address() + 3) / cache_line_size; 266 267 guarantee(is_aligned, "destination must be aligned"); 268 269 // The destination lies within a single cache line. 270 set_destination(dest); 271 } 272 273 274 void NativeMovConstReg::verify() { 275 #ifdef AMD64 276 // make sure code pattern is actually a mov reg64, imm64 instruction 277 if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) || 278 (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) { 279 print(); 280 fatal("not a REX.W[B] mov reg64, imm64"); 281 } 282 #else 283 // make sure code pattern is actually a mov reg, imm32 instruction 284 u_char test_byte = *(u_char*)instruction_address(); 285 u_char test_byte_2 = test_byte & ( 0xff ^ register_mask); 286 if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32"); 287 #endif // AMD64 288 } 289 290 291 void NativeMovConstReg::print() { 292 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, 293 p2i(instruction_address()), data()); 294 } 295 296 //------------------------------------------------------------------- 297 298 int NativeMovRegMem::instruction_start() const { 299 int off = 0; 300 u_char instr_0 = ubyte_at(off); 301 302 // See comment in Assembler::locate_operand() about VEX prefixes. 303 if (instr_0 == instruction_VEX_prefix_2bytes) { 304 assert((UseAVX > 0), "shouldn't have VEX prefix"); 305 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); 306 return 2; 307 } 308 if (instr_0 == instruction_VEX_prefix_3bytes) { 309 assert((UseAVX > 0), "shouldn't have VEX prefix"); 310 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); 311 return 3; 312 } 313 if (instr_0 == instruction_EVEX_prefix_4bytes) { 314 assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix"); 315 return 4; 316 } 317 318 // First check to see if we have a (prefixed or not) xor 319 if (instr_0 >= instruction_prefix_wide_lo && // 0x40 320 instr_0 <= instruction_prefix_wide_hi) { // 0x4f 321 off++; 322 instr_0 = ubyte_at(off); 323 } 324 325 if (instr_0 == instruction_code_xor) { 326 off += 2; 327 instr_0 = ubyte_at(off); 328 } 329 330 // Now look for the real instruction and the many prefix/size specifiers. 331 332 if (instr_0 == instruction_operandsize_prefix ) { // 0x66 333 off++; // Not SSE instructions 334 instr_0 = ubyte_at(off); 335 } 336 337 if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3 338 instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2 339 off++; 340 instr_0 = ubyte_at(off); 341 } 342 343 if ( instr_0 >= instruction_prefix_wide_lo && // 0x40 344 instr_0 <= instruction_prefix_wide_hi) { // 0x4f 345 off++; 346 instr_0 = ubyte_at(off); 347 } 348 349 350 if (instr_0 == instruction_extended_prefix ) { // 0x0f 351 off++; 352 } 353 354 return off; 355 } 356 357 address NativeMovRegMem::instruction_address() const { 358 return addr_at(instruction_start()); 359 } 360 361 address NativeMovRegMem::next_instruction_address() const { 362 address ret = instruction_address() + instruction_size; 363 u_char instr_0 = *(u_char*) instruction_address(); 364 switch (instr_0) { 365 case instruction_operandsize_prefix: 366 367 fatal("should have skipped instruction_operandsize_prefix"); 368 break; 369 370 case instruction_extended_prefix: 371 fatal("should have skipped instruction_extended_prefix"); 372 break; 373 374 case instruction_code_mem2reg_movslq: // 0x63 375 case instruction_code_mem2reg_movzxb: // 0xB6 376 case instruction_code_mem2reg_movsxb: // 0xBE 377 case instruction_code_mem2reg_movzxw: // 0xB7 378 case instruction_code_mem2reg_movsxw: // 0xBF 379 case instruction_code_reg2mem: // 0x89 (q/l) 380 case instruction_code_mem2reg: // 0x8B (q/l) 381 case instruction_code_reg2memb: // 0x88 382 case instruction_code_mem2regb: // 0x8a 383 384 case instruction_code_lea: // 0x8d 385 386 case instruction_code_float_s: // 0xd9 fld_s a 387 case instruction_code_float_d: // 0xdd fld_d a 388 389 case instruction_code_xmm_load: // 0x10 390 case instruction_code_xmm_store: // 0x11 391 case instruction_code_xmm_lpd: // 0x12 392 { 393 // If there is an SIB then instruction is longer than expected 394 u_char mod_rm = *(u_char*)(instruction_address() + 1); 395 if ((mod_rm & 7) == 0x4) { 396 ret++; 397 } 398 } 399 case instruction_code_xor: 400 fatal("should have skipped xor lead in"); 401 break; 402 403 default: 404 fatal("not a NativeMovRegMem"); 405 } 406 return ret; 407 408 } 409 410 int NativeMovRegMem::offset() const{ 411 int off = data_offset + instruction_start(); 412 u_char mod_rm = *(u_char*)(instruction_address() + 1); 413 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is 414 // the encoding to use an SIB byte. Which will have the nnnn 415 // field off by one byte 416 if ((mod_rm & 7) == 0x4) { 417 off++; 418 } 419 return int_at(off); 420 } 421 422 void NativeMovRegMem::set_offset(int x) { 423 int off = data_offset + instruction_start(); 424 u_char mod_rm = *(u_char*)(instruction_address() + 1); 425 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is 426 // the encoding to use an SIB byte. Which will have the nnnn 427 // field off by one byte 428 if ((mod_rm & 7) == 0x4) { 429 off++; 430 } 431 set_int_at(off, x); 432 } 433 434 void NativeMovRegMem::verify() { 435 // make sure code pattern is actually a mov [reg+offset], reg instruction 436 u_char test_byte = *(u_char*)instruction_address(); 437 switch (test_byte) { 438 case instruction_code_reg2memb: // 0x88 movb a, r 439 case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit) 440 case instruction_code_mem2regb: // 0x8a movb r, a 441 case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit) 442 break; 443 444 case instruction_code_mem2reg_movslq: // 0x63 movsql r, a 445 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb) 446 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw) 447 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb) 448 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw) 449 break; 450 451 case instruction_code_float_s: // 0xd9 fld_s a 452 case instruction_code_float_d: // 0xdd fld_d a 453 case instruction_code_xmm_load: // 0x10 movsd xmm, a 454 case instruction_code_xmm_store: // 0x11 movsd a, xmm 455 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a 456 break; 457 458 case instruction_code_lea: // 0x8d lea r, a 459 break; 460 461 default: 462 fatal ("not a mov [reg+offs], reg instruction"); 463 } 464 } 465 466 467 void NativeMovRegMem::print() { 468 tty->print_cr(PTR_FORMAT ": mov reg, [reg + %x]", p2i(instruction_address()), offset()); 469 } 470 471 //------------------------------------------------------------------- 472 473 void NativeLoadAddress::verify() { 474 // make sure code pattern is actually a mov [reg+offset], reg instruction 475 u_char test_byte = *(u_char*)instruction_address(); 476 #ifdef _LP64 477 if ( (test_byte == instruction_prefix_wide || 478 test_byte == instruction_prefix_wide_extended) ) { 479 test_byte = *(u_char*)(instruction_address() + 1); 480 } 481 #endif // _LP64 482 if ( ! ((test_byte == lea_instruction_code) 483 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) { 484 fatal ("not a lea reg, [reg+offs] instruction"); 485 } 486 } 487 488 489 void NativeLoadAddress::print() { 490 tty->print_cr(PTR_FORMAT ": lea [reg + %x], reg", p2i(instruction_address()), offset()); 491 } 492 493 //-------------------------------------------------------------------------------- 494 495 void NativeJump::verify() { 496 if (*(u_char*)instruction_address() != instruction_code) { 497 // far jump 498 NativeMovConstReg* mov = nativeMovConstReg_at(instruction_address()); 499 NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address()); 500 if (!jmp->is_jump_reg()) { 501 fatal("not a jump instruction"); 502 } 503 } 504 } 505 506 507 void NativeJump::insert(address code_pos, address entry) { 508 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 509 #ifdef AMD64 510 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 511 #endif // AMD64 512 513 *code_pos = instruction_code; 514 *((int32_t*)(code_pos + 1)) = (int32_t)disp; 515 516 ICache::invalidate_range(code_pos, instruction_size); 517 } 518 519 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { 520 // Patching to not_entrant can happen while activations of the method are 521 // in use. The patching in that instance must happen only when certain 522 // alignment restrictions are true. These guarantees check those 523 // conditions. 524 #ifdef AMD64 525 const int linesize = 64; 526 #else 527 const int linesize = 32; 528 #endif // AMD64 529 530 // Must be wordSize aligned 531 guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0, 532 "illegal address for code patching 2"); 533 // First 5 bytes must be within the same cache line - 4827828 534 guarantee((uintptr_t) verified_entry / linesize == 535 ((uintptr_t) verified_entry + 4) / linesize, 536 "illegal address for code patching 3"); 537 } 538 539 540 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) 541 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes. 542 // First patches the first word atomically to be a jump to itself. 543 // Then patches the last byte and then atomically patches the first word (4-bytes), 544 // thus inserting the desired jump 545 // This code is mt-safe with the following conditions: entry point is 4 byte aligned, 546 // entry point is in same cache line as unverified entry point, and the instruction being 547 // patched is >= 5 byte (size of patch). 548 // 549 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit. 550 // In C1 the restriction is enforced by CodeEmitter::method_entry 551 // In JVMCI, the restriction is enforced by HotSpotFrameContext.enter(...) 552 // 553 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { 554 // complete jump instruction (to be inserted) is in code_buffer; 555 unsigned char code_buffer[5]; 556 code_buffer[0] = instruction_code; 557 intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4); 558 #ifdef AMD64 559 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 560 #endif // AMD64 561 *(int32_t*)(code_buffer + 1) = (int32_t)disp; 562 563 check_verified_entry_alignment(entry, verified_entry); 564 565 // Can't call nativeJump_at() because it's asserts jump exists 566 NativeJump* n_jump = (NativeJump*) verified_entry; 567 568 //First patch dummy jmp in place 569 570 unsigned char patch[4]; 571 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); 572 patch[0] = 0xEB; // jmp rel8 573 patch[1] = 0xFE; // jmp to self 574 patch[2] = 0xEB; 575 patch[3] = 0xFE; 576 577 // First patch dummy jmp in place 578 *(int32_t*)verified_entry = *(int32_t *)patch; 579 580 n_jump->wrote(0); 581 582 // Patch 5th byte (from jump instruction) 583 verified_entry[4] = code_buffer[4]; 584 585 n_jump->wrote(4); 586 587 // Patch bytes 0-3 (from jump instruction) 588 *(int32_t*)verified_entry = *(int32_t *)code_buffer; 589 // Invalidate. Opteron requires a flush after every write. 590 n_jump->wrote(0); 591 592 } 593 594 address NativeFarJump::jump_destination() const { 595 NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0)); 596 return (address)mov->data(); 597 } 598 599 void NativeFarJump::verify() { 600 if (is_far_jump()) { 601 NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0)); 602 NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address()); 603 if (jmp->is_jump_reg()) return; 604 } 605 fatal("not a jump instruction"); 606 } 607 608 void NativePopReg::insert(address code_pos, Register reg) { 609 assert(reg->encoding() < 8, "no space for REX"); 610 assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); 611 *code_pos = (u_char)(instruction_code | reg->encoding()); 612 ICache::invalidate_range(code_pos, instruction_size); 613 } 614 615 616 void NativeIllegalInstruction::insert(address code_pos) { 617 assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update"); 618 *(short *)code_pos = instruction_code; 619 ICache::invalidate_range(code_pos, instruction_size); 620 } 621 622 void NativeGeneralJump::verify() { 623 assert(((NativeInstruction *)this)->is_jump() || 624 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); 625 } 626 627 628 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { 629 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 630 #ifdef AMD64 631 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 632 #endif // AMD64 633 634 *code_pos = unconditional_long_jump; 635 *((int32_t *)(code_pos+1)) = (int32_t) disp; 636 ICache::invalidate_range(code_pos, instruction_size); 637 } 638 639 640 // MT-safe patching of a long jump instruction. 641 // First patches first word of instruction to two jmp's that jmps to them 642 // selfs (spinlock). Then patches the last byte, and then atomicly replaces 643 // the jmp's with the first 4 byte of the new instruction. 644 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { 645 assert (instr_addr != NULL, "illegal address for code patching (4)"); 646 NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump 647 648 // Temporary code 649 unsigned char patch[4]; 650 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); 651 patch[0] = 0xEB; // jmp rel8 652 patch[1] = 0xFE; // jmp to self 653 patch[2] = 0xEB; 654 patch[3] = 0xFE; 655 656 // First patch dummy jmp in place 657 *(int32_t*)instr_addr = *(int32_t *)patch; 658 n_jump->wrote(0); 659 660 // Patch 4th byte 661 instr_addr[4] = code_buffer[4]; 662 663 n_jump->wrote(4); 664 665 // Patch bytes 0-3 666 *(jint*)instr_addr = *(jint *)code_buffer; 667 668 n_jump->wrote(0); 669 670 #ifdef ASSERT 671 // verify patching 672 for ( int i = 0; i < instruction_size; i++) { 673 address ptr = (address)((intptr_t)code_buffer + i); 674 int a_byte = (*ptr) & 0xFF; 675 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); 676 } 677 #endif 678 679 } 680 681 682 683 address NativeGeneralJump::jump_destination() const { 684 int op_code = ubyte_at(0); 685 bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F); 686 int offset = (op_code == 0x0F) ? 2 : 1; 687 int length = offset + ((is_rel32off) ? 4 : 1); 688 689 if (is_rel32off) 690 return addr_at(0) + length + int_at(offset); 691 else 692 return addr_at(0) + length + sbyte_at(offset); 693 }