1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "nativeInst_x86.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/handles.hpp" 31 #include "runtime/sharedRuntime.hpp" 32 #include "runtime/stubRoutines.hpp" 33 #include "utilities/ostream.hpp" 34 #ifdef COMPILER1 35 #include "c1/c1_Runtime1.hpp" 36 #endif 37 38 void NativeInstruction::wrote(int offset) { 39 ICache::invalidate_word(addr_at(offset)); 40 } 41 42 void NativeLoadGot::report_and_fail() const { 43 tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address())); 44 fatal("not a indirect rip mov to rbx"); 45 } 46 47 void NativeLoadGot::verify() const { 48 if (has_rex) { 49 int rex = ubyte_at(0); 50 if (rex != rex_prefix) { 51 report_and_fail(); 52 } 53 } 54 55 int inst = ubyte_at(rex_size); 56 if (inst != instruction_code) { 57 report_and_fail(); 58 } 59 int modrm = ubyte_at(rex_size + 1); 60 if (modrm != modrm_rbx_code && modrm != modrm_rax_code) { 61 report_and_fail(); 62 } 63 } 64 65 intptr_t NativeLoadGot::data() const { 66 return *(intptr_t *) got_address(); 67 } 68 69 address NativePltCall::destination() const { 70 NativeGotJump* jump = nativeGotJump_at(plt_jump()); 71 return jump->destination(); 72 } 73 74 address NativePltCall::plt_entry() const { 75 return return_address() + displacement(); 76 } 77 78 address NativePltCall::plt_jump() const { 79 address entry = plt_entry(); 80 // Virtual PLT code has move instruction first 81 if (((NativeGotJump*)entry)->is_GotJump()) { 82 return entry; 83 } else { 84 return nativeLoadGot_at(entry)->next_instruction_address(); 85 } 86 } 87 88 address NativePltCall::plt_load_got() const { 89 address entry = plt_entry(); 90 if (!((NativeGotJump*)entry)->is_GotJump()) { 91 // Virtual PLT code has move instruction first 92 return entry; 93 } else { 94 // Static PLT code has move instruction second (from c2i stub) 95 return nativeGotJump_at(entry)->next_instruction_address(); 96 } 97 } 98 99 address NativePltCall::plt_c2i_stub() const { 100 address entry = plt_load_got(); 101 // This method should be called only for static calls which has C2I stub. 102 NativeLoadGot* load = nativeLoadGot_at(entry); 103 return entry; 104 } 105 106 address NativePltCall::plt_resolve_call() const { 107 NativeGotJump* jump = nativeGotJump_at(plt_jump()); 108 address entry = jump->next_instruction_address(); 109 if (((NativeGotJump*)entry)->is_GotJump()) { 110 return entry; 111 } else { 112 // c2i stub 2 instructions 113 entry = nativeLoadGot_at(entry)->next_instruction_address(); 114 return nativeGotJump_at(entry)->next_instruction_address(); 115 } 116 } 117 118 void NativePltCall::reset_to_plt_resolve_call() { 119 set_destination_mt_safe(plt_resolve_call()); 120 } 121 122 void NativePltCall::set_destination_mt_safe(address dest) { 123 // rewriting the value in the GOT, it should always be aligned 124 NativeGotJump* jump = nativeGotJump_at(plt_jump()); 125 address* got = (address *) jump->got_address(); 126 *got = dest; 127 } 128 129 void NativePltCall::set_stub_to_clean() { 130 NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub()); 131 NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); 132 method_loader->set_data(0); 133 jump->set_jump_destination((address)-1); 134 } 135 136 void NativePltCall::verify() const { 137 // Make sure code pattern is actually a call rip+off32 instruction. 138 int inst = ubyte_at(0); 139 if (inst != instruction_code) { 140 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), 141 inst); 142 fatal("not a call rip+off32"); 143 } 144 } 145 146 address NativeGotJump::destination() const { 147 address *got_entry = (address *) got_address(); 148 return *got_entry; 149 } 150 151 void NativeGotJump::verify() const { 152 int inst = ubyte_at(0); 153 if (inst != instruction_code) { 154 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), 155 inst); 156 fatal("not a indirect rip jump"); 157 } 158 } 159 160 void NativeCall::verify() { 161 // Make sure code pattern is actually a call imm32 instruction. 162 int inst = ubyte_at(0); 163 if (inst != instruction_code) { 164 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), 165 inst); 166 fatal("not a call disp32"); 167 } 168 } 169 170 address NativeCall::destination() const { 171 // Getting the destination of a call isn't safe because that call can 172 // be getting patched while you're calling this. There's only special 173 // places where this can be called but not automatically verifiable by 174 // checking which locks are held. The solution is true atomic patching 175 // on x86, nyi. 176 return return_address() + displacement(); 177 } 178 179 void NativeCall::print() { 180 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT, 181 p2i(instruction_address()), p2i(destination())); 182 } 183 184 // Inserts a native call instruction at a given pc 185 void NativeCall::insert(address code_pos, address entry) { 186 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 187 #ifdef AMD64 188 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); 189 #endif // AMD64 190 *code_pos = instruction_code; 191 *((int32_t *)(code_pos+1)) = (int32_t) disp; 192 ICache::invalidate_range(code_pos, instruction_size); 193 } 194 195 // MT-safe patching of a call instruction. 196 // First patches first word of instruction to two jmp's that jmps to them 197 // selfs (spinlock). Then patches the last byte, and then atomicly replaces 198 // the jmp's with the first 4 byte of the new instruction. 199 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { 200 assert(Patching_lock->is_locked() || 201 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 202 assert (instr_addr != NULL, "illegal address for code patching"); 203 204 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call 205 if (os::is_MP()) { 206 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); 207 } 208 209 // First patch dummy jmp in place 210 unsigned char patch[4]; 211 assert(sizeof(patch)==sizeof(jint), "sanity check"); 212 patch[0] = 0xEB; // jmp rel8 213 patch[1] = 0xFE; // jmp to self 214 patch[2] = 0xEB; 215 patch[3] = 0xFE; 216 217 // First patch dummy jmp in place 218 *(jint*)instr_addr = *(jint *)patch; 219 220 // Invalidate. Opteron requires a flush after every write. 221 n_call->wrote(0); 222 223 // Patch 4th byte 224 instr_addr[4] = code_buffer[4]; 225 226 n_call->wrote(4); 227 228 // Patch bytes 0-3 229 *(jint*)instr_addr = *(jint *)code_buffer; 230 231 n_call->wrote(0); 232 233 #ifdef ASSERT 234 // verify patching 235 for ( int i = 0; i < instruction_size; i++) { 236 address ptr = (address)((intptr_t)code_buffer + i); 237 int a_byte = (*ptr) & 0xFF; 238 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); 239 } 240 #endif 241 242 } 243 244 245 // Similar to replace_mt_safe, but just changes the destination. The 246 // important thing is that free-running threads are able to execute this 247 // call instruction at all times. If the displacement field is aligned 248 // we can simply rely on atomicity of 32-bit writes to make sure other threads 249 // will see no intermediate states. Otherwise, the first two bytes of the 250 // call are guaranteed to be aligned, and can be atomically patched to a 251 // self-loop to guard the instruction while we change the other bytes. 252 253 // We cannot rely on locks here, since the free-running threads must run at 254 // full speed. 255 // 256 // Used in the runtime linkage of calls; see class CompiledIC. 257 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) 258 void NativeCall::set_destination_mt_safe(address dest) { 259 debug_only(verify()); 260 // Make sure patching code is locked. No two threads can patch at the same 261 // time but one may be executing this code. 262 assert(Patching_lock->is_locked() || 263 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 264 // Both C1 and C2 should now be generating code which aligns the patched address 265 // to be within a single cache line except that C1 does not do the alignment on 266 // uniprocessor systems. 267 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size == 268 ((uintptr_t)displacement_address() + 3) / cache_line_size; 269 270 guarantee(!os::is_MP() || is_aligned, "destination must be aligned"); 271 272 if (is_aligned) { 273 // Simple case: The destination lies within a single cache line. 274 set_destination(dest); 275 } else if ((uintptr_t)instruction_address() / cache_line_size == 276 ((uintptr_t)instruction_address()+1) / cache_line_size) { 277 // Tricky case: The instruction prefix lies within a single cache line. 278 intptr_t disp = dest - return_address(); 279 #ifdef AMD64 280 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); 281 #endif // AMD64 282 283 int call_opcode = instruction_address()[0]; 284 285 // First patch dummy jump in place: 286 { 287 u_char patch_jump[2]; 288 patch_jump[0] = 0xEB; // jmp rel8 289 patch_jump[1] = 0xFE; // jmp to self 290 291 assert(sizeof(patch_jump)==sizeof(short), "sanity check"); 292 *(short*)instruction_address() = *(short*)patch_jump; 293 } 294 // Invalidate. Opteron requires a flush after every write. 295 wrote(0); 296 297 // (Note: We assume any reader which has already started to read 298 // the unpatched call will completely read the whole unpatched call 299 // without seeing the next writes we are about to make.) 300 301 // Next, patch the last three bytes: 302 u_char patch_disp[5]; 303 patch_disp[0] = call_opcode; 304 *(int32_t*)&patch_disp[1] = (int32_t)disp; 305 assert(sizeof(patch_disp)==instruction_size, "sanity check"); 306 for (int i = sizeof(short); i < instruction_size; i++) 307 instruction_address()[i] = patch_disp[i]; 308 309 // Invalidate. Opteron requires a flush after every write. 310 wrote(sizeof(short)); 311 312 // (Note: We assume that any reader which reads the opcode we are 313 // about to repatch will also read the writes we just made.) 314 315 // Finally, overwrite the jump: 316 *(short*)instruction_address() = *(short*)patch_disp; 317 // Invalidate. Opteron requires a flush after every write. 318 wrote(0); 319 320 debug_only(verify()); 321 guarantee(destination() == dest, "patch succeeded"); 322 } else { 323 // Impossible: One or the other must be atomically writable. 324 ShouldNotReachHere(); 325 } 326 } 327 328 329 void NativeMovConstReg::verify() { 330 #ifdef AMD64 331 // make sure code pattern is actually a mov reg64, imm64 instruction 332 if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) || 333 (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) { 334 print(); 335 fatal("not a REX.W[B] mov reg64, imm64"); 336 } 337 #else 338 // make sure code pattern is actually a mov reg, imm32 instruction 339 u_char test_byte = *(u_char*)instruction_address(); 340 u_char test_byte_2 = test_byte & ( 0xff ^ register_mask); 341 if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32"); 342 #endif // AMD64 343 } 344 345 346 void NativeMovConstReg::print() { 347 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, 348 p2i(instruction_address()), data()); 349 } 350 351 //------------------------------------------------------------------- 352 353 int NativeMovRegMem::instruction_start() const { 354 int off = 0; 355 u_char instr_0 = ubyte_at(off); 356 357 // See comment in Assembler::locate_operand() about VEX prefixes. 358 if (instr_0 == instruction_VEX_prefix_2bytes) { 359 assert((UseAVX > 0), "shouldn't have VEX prefix"); 360 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); 361 return 2; 362 } 363 if (instr_0 == instruction_VEX_prefix_3bytes) { 364 assert((UseAVX > 0), "shouldn't have VEX prefix"); 365 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); 366 return 3; 367 } 368 369 // First check to see if we have a (prefixed or not) xor 370 if (instr_0 >= instruction_prefix_wide_lo && // 0x40 371 instr_0 <= instruction_prefix_wide_hi) { // 0x4f 372 off++; 373 instr_0 = ubyte_at(off); 374 } 375 376 if (instr_0 == instruction_code_xor) { 377 off += 2; 378 instr_0 = ubyte_at(off); 379 } 380 381 // Now look for the real instruction and the many prefix/size specifiers. 382 383 if (instr_0 == instruction_operandsize_prefix ) { // 0x66 384 off++; // Not SSE instructions 385 instr_0 = ubyte_at(off); 386 } 387 388 if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3 389 instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2 390 off++; 391 instr_0 = ubyte_at(off); 392 } 393 394 if ( instr_0 >= instruction_prefix_wide_lo && // 0x40 395 instr_0 <= instruction_prefix_wide_hi) { // 0x4f 396 off++; 397 instr_0 = ubyte_at(off); 398 } 399 400 401 if (instr_0 == instruction_extended_prefix ) { // 0x0f 402 off++; 403 } 404 405 return off; 406 } 407 408 address NativeMovRegMem::instruction_address() const { 409 return addr_at(instruction_start()); 410 } 411 412 address NativeMovRegMem::next_instruction_address() const { 413 address ret = instruction_address() + instruction_size; 414 u_char instr_0 = *(u_char*) instruction_address(); 415 switch (instr_0) { 416 case instruction_operandsize_prefix: 417 418 fatal("should have skipped instruction_operandsize_prefix"); 419 break; 420 421 case instruction_extended_prefix: 422 fatal("should have skipped instruction_extended_prefix"); 423 break; 424 425 case instruction_code_mem2reg_movslq: // 0x63 426 case instruction_code_mem2reg_movzxb: // 0xB6 427 case instruction_code_mem2reg_movsxb: // 0xBE 428 case instruction_code_mem2reg_movzxw: // 0xB7 429 case instruction_code_mem2reg_movsxw: // 0xBF 430 case instruction_code_reg2mem: // 0x89 (q/l) 431 case instruction_code_mem2reg: // 0x8B (q/l) 432 case instruction_code_reg2memb: // 0x88 433 case instruction_code_mem2regb: // 0x8a 434 435 case instruction_code_float_s: // 0xd9 fld_s a 436 case instruction_code_float_d: // 0xdd fld_d a 437 438 case instruction_code_xmm_load: // 0x10 439 case instruction_code_xmm_store: // 0x11 440 case instruction_code_xmm_lpd: // 0x12 441 { 442 // If there is an SIB then instruction is longer than expected 443 u_char mod_rm = *(u_char*)(instruction_address() + 1); 444 if ((mod_rm & 7) == 0x4) { 445 ret++; 446 } 447 } 448 case instruction_code_xor: 449 fatal("should have skipped xor lead in"); 450 break; 451 452 default: 453 fatal("not a NativeMovRegMem"); 454 } 455 return ret; 456 457 } 458 459 int NativeMovRegMem::offset() const{ 460 int off = data_offset + instruction_start(); 461 u_char mod_rm = *(u_char*)(instruction_address() + 1); 462 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is 463 // the encoding to use an SIB byte. Which will have the nnnn 464 // field off by one byte 465 if ((mod_rm & 7) == 0x4) { 466 off++; 467 } 468 return int_at(off); 469 } 470 471 void NativeMovRegMem::set_offset(int x) { 472 int off = data_offset + instruction_start(); 473 u_char mod_rm = *(u_char*)(instruction_address() + 1); 474 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is 475 // the encoding to use an SIB byte. Which will have the nnnn 476 // field off by one byte 477 if ((mod_rm & 7) == 0x4) { 478 off++; 479 } 480 set_int_at(off, x); 481 } 482 483 void NativeMovRegMem::verify() { 484 // make sure code pattern is actually a mov [reg+offset], reg instruction 485 u_char test_byte = *(u_char*)instruction_address(); 486 switch (test_byte) { 487 case instruction_code_reg2memb: // 0x88 movb a, r 488 case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit) 489 case instruction_code_mem2regb: // 0x8a movb r, a 490 case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit) 491 break; 492 493 case instruction_code_mem2reg_movslq: // 0x63 movsql r, a 494 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb) 495 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw) 496 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb) 497 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw) 498 break; 499 500 case instruction_code_float_s: // 0xd9 fld_s a 501 case instruction_code_float_d: // 0xdd fld_d a 502 case instruction_code_xmm_load: // 0x10 movsd xmm, a 503 case instruction_code_xmm_store: // 0x11 movsd a, xmm 504 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a 505 break; 506 507 default: 508 fatal ("not a mov [reg+offs], reg instruction"); 509 } 510 } 511 512 513 void NativeMovRegMem::print() { 514 tty->print_cr(PTR_FORMAT ": mov reg, [reg + %x]", p2i(instruction_address()), offset()); 515 } 516 517 //------------------------------------------------------------------- 518 519 void NativeLoadAddress::verify() { 520 // make sure code pattern is actually a mov [reg+offset], reg instruction 521 u_char test_byte = *(u_char*)instruction_address(); 522 #ifdef _LP64 523 if ( (test_byte == instruction_prefix_wide || 524 test_byte == instruction_prefix_wide_extended) ) { 525 test_byte = *(u_char*)(instruction_address() + 1); 526 } 527 #endif // _LP64 528 if ( ! ((test_byte == lea_instruction_code) 529 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) { 530 fatal ("not a lea reg, [reg+offs] instruction"); 531 } 532 } 533 534 535 void NativeLoadAddress::print() { 536 tty->print_cr(PTR_FORMAT ": lea [reg + %x], reg", p2i(instruction_address()), offset()); 537 } 538 539 //-------------------------------------------------------------------------------- 540 541 void NativeJump::verify() { 542 if (*(u_char*)instruction_address() != instruction_code) { 543 // far jump 544 NativeMovConstReg* mov = nativeMovConstReg_at(instruction_address()); 545 NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address()); 546 if (!jmp->is_jump_reg()) { 547 fatal("not a jump instruction"); 548 } 549 } 550 } 551 552 553 void NativeJump::insert(address code_pos, address entry) { 554 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 555 #ifdef AMD64 556 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 557 #endif // AMD64 558 559 *code_pos = instruction_code; 560 *((int32_t*)(code_pos + 1)) = (int32_t)disp; 561 562 ICache::invalidate_range(code_pos, instruction_size); 563 } 564 565 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { 566 // Patching to not_entrant can happen while activations of the method are 567 // in use. The patching in that instance must happen only when certain 568 // alignment restrictions are true. These guarantees check those 569 // conditions. 570 #ifdef AMD64 571 const int linesize = 64; 572 #else 573 const int linesize = 32; 574 #endif // AMD64 575 576 // Must be wordSize aligned 577 guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0, 578 "illegal address for code patching 2"); 579 // First 5 bytes must be within the same cache line - 4827828 580 guarantee((uintptr_t) verified_entry / linesize == 581 ((uintptr_t) verified_entry + 4) / linesize, 582 "illegal address for code patching 3"); 583 } 584 585 586 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) 587 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes. 588 // First patches the first word atomically to be a jump to itself. 589 // Then patches the last byte and then atomically patches the first word (4-bytes), 590 // thus inserting the desired jump 591 // This code is mt-safe with the following conditions: entry point is 4 byte aligned, 592 // entry point is in same cache line as unverified entry point, and the instruction being 593 // patched is >= 5 byte (size of patch). 594 // 595 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit. 596 // In C1 the restriction is enforced by CodeEmitter::method_entry 597 // In JVMCI, the restriction is enforced by HotSpotFrameContext.enter(...) 598 // 599 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { 600 // complete jump instruction (to be inserted) is in code_buffer; 601 unsigned char code_buffer[5]; 602 code_buffer[0] = instruction_code; 603 intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4); 604 #ifdef AMD64 605 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 606 #endif // AMD64 607 *(int32_t*)(code_buffer + 1) = (int32_t)disp; 608 609 check_verified_entry_alignment(entry, verified_entry); 610 611 // Can't call nativeJump_at() because it's asserts jump exists 612 NativeJump* n_jump = (NativeJump*) verified_entry; 613 614 //First patch dummy jmp in place 615 616 unsigned char patch[4]; 617 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); 618 patch[0] = 0xEB; // jmp rel8 619 patch[1] = 0xFE; // jmp to self 620 patch[2] = 0xEB; 621 patch[3] = 0xFE; 622 623 // First patch dummy jmp in place 624 *(int32_t*)verified_entry = *(int32_t *)patch; 625 626 n_jump->wrote(0); 627 628 // Patch 5th byte (from jump instruction) 629 verified_entry[4] = code_buffer[4]; 630 631 n_jump->wrote(4); 632 633 // Patch bytes 0-3 (from jump instruction) 634 *(int32_t*)verified_entry = *(int32_t *)code_buffer; 635 // Invalidate. Opteron requires a flush after every write. 636 n_jump->wrote(0); 637 638 } 639 640 address NativeFarJump::jump_destination() const { 641 NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0)); 642 return (address)mov->data(); 643 } 644 645 void NativeFarJump::verify() { 646 if (is_far_jump()) { 647 NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0)); 648 NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address()); 649 if (jmp->is_jump_reg()) return; 650 } 651 fatal("not a jump instruction"); 652 } 653 654 void NativePopReg::insert(address code_pos, Register reg) { 655 assert(reg->encoding() < 8, "no space for REX"); 656 assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); 657 *code_pos = (u_char)(instruction_code | reg->encoding()); 658 ICache::invalidate_range(code_pos, instruction_size); 659 } 660 661 662 void NativeIllegalInstruction::insert(address code_pos) { 663 assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update"); 664 *(short *)code_pos = instruction_code; 665 ICache::invalidate_range(code_pos, instruction_size); 666 } 667 668 void NativeGeneralJump::verify() { 669 assert(((NativeInstruction *)this)->is_jump() || 670 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); 671 } 672 673 674 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { 675 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 676 #ifdef AMD64 677 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 678 #endif // AMD64 679 680 *code_pos = unconditional_long_jump; 681 *((int32_t *)(code_pos+1)) = (int32_t) disp; 682 ICache::invalidate_range(code_pos, instruction_size); 683 } 684 685 686 // MT-safe patching of a long jump instruction. 687 // First patches first word of instruction to two jmp's that jmps to them 688 // selfs (spinlock). Then patches the last byte, and then atomicly replaces 689 // the jmp's with the first 4 byte of the new instruction. 690 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { 691 assert (instr_addr != NULL, "illegal address for code patching (4)"); 692 NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump 693 694 // Temporary code 695 unsigned char patch[4]; 696 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); 697 patch[0] = 0xEB; // jmp rel8 698 patch[1] = 0xFE; // jmp to self 699 patch[2] = 0xEB; 700 patch[3] = 0xFE; 701 702 // First patch dummy jmp in place 703 *(int32_t*)instr_addr = *(int32_t *)patch; 704 n_jump->wrote(0); 705 706 // Patch 4th byte 707 instr_addr[4] = code_buffer[4]; 708 709 n_jump->wrote(4); 710 711 // Patch bytes 0-3 712 *(jint*)instr_addr = *(jint *)code_buffer; 713 714 n_jump->wrote(0); 715 716 #ifdef ASSERT 717 // verify patching 718 for ( int i = 0; i < instruction_size; i++) { 719 address ptr = (address)((intptr_t)code_buffer + i); 720 int a_byte = (*ptr) & 0xFF; 721 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); 722 } 723 #endif 724 725 } 726 727 728 729 address NativeGeneralJump::jump_destination() const { 730 int op_code = ubyte_at(0); 731 bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F); 732 int offset = (op_code == 0x0F) ? 2 : 1; 733 int length = offset + ((is_rel32off) ? 4 : 1); 734 735 if (is_rel32off) 736 return addr_at(0) + length + int_at(offset); 737 else 738 return addr_at(0) + length + sbyte_at(offset); 739 }