1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "assembler_x86.inline.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "nativeInst_x86.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/handles.hpp" 31 #include "runtime/sharedRuntime.hpp" 32 #include "runtime/stubRoutines.hpp" 33 #include "utilities/ostream.hpp" 34 #ifdef COMPILER1 35 #include "c1/c1_Runtime1.hpp" 36 #endif 37 38 void NativeInstruction::wrote(int offset) { 39 ICache::invalidate_word(addr_at(offset)); 40 } 41 42 43 void NativeCall::verify() { 44 // Make sure code pattern is actually a call imm32 instruction. 45 int inst = ubyte_at(0); 46 if (inst != instruction_code) { 47 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", instruction_address(), 48 inst); 49 fatal("not a call disp32"); 50 } 51 } 52 53 address NativeCall::destination() const { 54 // Getting the destination of a call isn't safe because that call can 55 // be getting patched while you're calling this. There's only special 56 // places where this can be called but not automatically verifiable by 57 // checking which locks are held. The solution is true atomic patching 58 // on x86, nyi. 59 return return_address() + displacement(); 60 } 61 62 void NativeCall::print() { 63 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT, 64 instruction_address(), destination()); 65 } 66 67 // Inserts a native call instruction at a given pc 68 void NativeCall::insert(address code_pos, address entry) { 69 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 70 #ifdef AMD64 71 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); 72 #endif // AMD64 73 *code_pos = instruction_code; 74 *((int32_t *)(code_pos+1)) = (int32_t) disp; 75 ICache::invalidate_range(code_pos, instruction_size); 76 } 77 78 // MT-safe patching of a call instruction. 79 // First patches first word of instruction to two jmp's that jmps to them 80 // selfs (spinlock). Then patches the last byte, and then atomicly replaces 81 // the jmp's with the first 4 byte of the new instruction. 82 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { 83 assert(Patching_lock->is_locked() || 84 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 85 assert (instr_addr != NULL, "illegal address for code patching"); 86 87 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call 88 if (os::is_MP()) { 89 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); 90 } 91 92 // First patch dummy jmp in place 93 unsigned char patch[4]; 94 assert(sizeof(patch)==sizeof(jint), "sanity check"); 95 patch[0] = 0xEB; // jmp rel8 96 patch[1] = 0xFE; // jmp to self 97 patch[2] = 0xEB; 98 patch[3] = 0xFE; 99 100 // First patch dummy jmp in place 101 *(jint*)instr_addr = *(jint *)patch; 102 103 // Invalidate. Opteron requires a flush after every write. 104 n_call->wrote(0); 105 106 // Patch 4th byte 107 instr_addr[4] = code_buffer[4]; 108 109 n_call->wrote(4); 110 111 // Patch bytes 0-3 112 *(jint*)instr_addr = *(jint *)code_buffer; 113 114 n_call->wrote(0); 115 116 #ifdef ASSERT 117 // verify patching 118 for ( int i = 0; i < instruction_size; i++) { 119 address ptr = (address)((intptr_t)code_buffer + i); 120 int a_byte = (*ptr) & 0xFF; 121 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); 122 } 123 #endif 124 125 } 126 127 128 // Similar to replace_mt_safe, but just changes the destination. The 129 // important thing is that free-running threads are able to execute this 130 // call instruction at all times. If the displacement field is aligned 131 // we can simply rely on atomicity of 32-bit writes to make sure other threads 132 // will see no intermediate states. Otherwise, the first two bytes of the 133 // call are guaranteed to be aligned, and can be atomically patched to a 134 // self-loop to guard the instruction while we change the other bytes. 135 136 // We cannot rely on locks here, since the free-running threads must run at 137 // full speed. 138 // 139 // Used in the runtime linkage of calls; see class CompiledIC. 140 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) 141 void NativeCall::set_destination_mt_safe(address dest) { 142 debug_only(verify()); 143 // Make sure patching code is locked. No two threads can patch at the same 144 // time but one may be executing this code. 145 assert(Patching_lock->is_locked() || 146 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 147 // Both C1 and C2 should now be generating code which aligns the patched address 148 // to be within a single cache line except that C1 does not do the alignment on 149 // uniprocessor systems. 150 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size == 151 ((uintptr_t)displacement_address() + 3) / cache_line_size; 152 153 guarantee(!os::is_MP() || is_aligned, "destination must be aligned"); 154 155 if (is_aligned) { 156 // Simple case: The destination lies within a single cache line. 157 set_destination(dest); 158 } else if ((uintptr_t)instruction_address() / cache_line_size == 159 ((uintptr_t)instruction_address()+1) / cache_line_size) { 160 // Tricky case: The instruction prefix lies within a single cache line. 161 intptr_t disp = dest - return_address(); 162 #ifdef AMD64 163 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); 164 #endif // AMD64 165 166 int call_opcode = instruction_address()[0]; 167 168 // First patch dummy jump in place: 169 { 170 u_char patch_jump[2]; 171 patch_jump[0] = 0xEB; // jmp rel8 172 patch_jump[1] = 0xFE; // jmp to self 173 174 assert(sizeof(patch_jump)==sizeof(short), "sanity check"); 175 *(short*)instruction_address() = *(short*)patch_jump; 176 } 177 // Invalidate. Opteron requires a flush after every write. 178 wrote(0); 179 180 // (Note: We assume any reader which has already started to read 181 // the unpatched call will completely read the whole unpatched call 182 // without seeing the next writes we are about to make.) 183 184 // Next, patch the last three bytes: 185 u_char patch_disp[5]; 186 patch_disp[0] = call_opcode; 187 *(int32_t*)&patch_disp[1] = (int32_t)disp; 188 assert(sizeof(patch_disp)==instruction_size, "sanity check"); 189 for (int i = sizeof(short); i < instruction_size; i++) 190 instruction_address()[i] = patch_disp[i]; 191 192 // Invalidate. Opteron requires a flush after every write. 193 wrote(sizeof(short)); 194 195 // (Note: We assume that any reader which reads the opcode we are 196 // about to repatch will also read the writes we just made.) 197 198 // Finally, overwrite the jump: 199 *(short*)instruction_address() = *(short*)patch_disp; 200 // Invalidate. Opteron requires a flush after every write. 201 wrote(0); 202 203 debug_only(verify()); 204 guarantee(destination() == dest, "patch succeeded"); 205 } else { 206 // Impossible: One or the other must be atomically writable. 207 ShouldNotReachHere(); 208 } 209 } 210 211 212 void NativeMovConstReg::verify() { 213 #ifdef AMD64 214 // make sure code pattern is actually a mov reg64, imm64 instruction 215 if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) || 216 (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) { 217 print(); 218 fatal("not a REX.W[B] mov reg64, imm64"); 219 } 220 #else 221 // make sure code pattern is actually a mov reg, imm32 instruction 222 u_char test_byte = *(u_char*)instruction_address(); 223 u_char test_byte_2 = test_byte & ( 0xff ^ register_mask); 224 if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32"); 225 #endif // AMD64 226 } 227 228 229 void NativeMovConstReg::print() { 230 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, 231 instruction_address(), data()); 232 } 233 234 //------------------------------------------------------------------- 235 236 int NativeMovRegMem::instruction_start() const { 237 int off = 0; 238 u_char instr_0 = ubyte_at(off); 239 240 // First check to see if we have a (prefixed or not) xor 241 if ( instr_0 >= instruction_prefix_wide_lo && // 0x40 242 instr_0 <= instruction_prefix_wide_hi) { // 0x4f 243 off++; 244 instr_0 = ubyte_at(off); 245 } 246 247 if (instr_0 == instruction_code_xor) { 248 off += 2; 249 instr_0 = ubyte_at(off); 250 } 251 252 // Now look for the real instruction and the many prefix/size specifiers. 253 254 if (instr_0 == instruction_operandsize_prefix ) { // 0x66 255 off++; // Not SSE instructions 256 instr_0 = ubyte_at(off); 257 } 258 259 if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3 260 instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2 261 off++; 262 instr_0 = ubyte_at(off); 263 } 264 265 if ( instr_0 >= instruction_prefix_wide_lo && // 0x40 266 instr_0 <= instruction_prefix_wide_hi) { // 0x4f 267 off++; 268 instr_0 = ubyte_at(off); 269 } 270 271 272 if (instr_0 == instruction_extended_prefix ) { // 0x0f 273 off++; 274 } 275 276 return off; 277 } 278 279 address NativeMovRegMem::instruction_address() const { 280 return addr_at(instruction_start()); 281 } 282 283 address NativeMovRegMem::next_instruction_address() const { 284 address ret = instruction_address() + instruction_size; 285 u_char instr_0 = *(u_char*) instruction_address(); 286 switch (instr_0) { 287 case instruction_operandsize_prefix: 288 289 fatal("should have skipped instruction_operandsize_prefix"); 290 break; 291 292 case instruction_extended_prefix: 293 fatal("should have skipped instruction_extended_prefix"); 294 break; 295 296 case instruction_code_mem2reg_movslq: // 0x63 297 case instruction_code_mem2reg_movzxb: // 0xB6 298 case instruction_code_mem2reg_movsxb: // 0xBE 299 case instruction_code_mem2reg_movzxw: // 0xB7 300 case instruction_code_mem2reg_movsxw: // 0xBF 301 case instruction_code_reg2mem: // 0x89 (q/l) 302 case instruction_code_mem2reg: // 0x8B (q/l) 303 case instruction_code_reg2memb: // 0x88 304 case instruction_code_mem2regb: // 0x8a 305 306 case instruction_code_float_s: // 0xd9 fld_s a 307 case instruction_code_float_d: // 0xdd fld_d a 308 309 case instruction_code_xmm_load: // 0x10 310 case instruction_code_xmm_store: // 0x11 311 case instruction_code_xmm_lpd: // 0x12 312 { 313 // If there is an SIB then instruction is longer than expected 314 u_char mod_rm = *(u_char*)(instruction_address() + 1); 315 if ((mod_rm & 7) == 0x4) { 316 ret++; 317 } 318 } 319 case instruction_code_xor: 320 fatal("should have skipped xor lead in"); 321 break; 322 323 default: 324 fatal("not a NativeMovRegMem"); 325 } 326 return ret; 327 328 } 329 330 int NativeMovRegMem::offset() const{ 331 int off = data_offset + instruction_start(); 332 u_char mod_rm = *(u_char*)(instruction_address() + 1); 333 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is 334 // the encoding to use an SIB byte. Which will have the nnnn 335 // field off by one byte 336 if ((mod_rm & 7) == 0x4) { 337 off++; 338 } 339 return int_at(off); 340 } 341 342 void NativeMovRegMem::set_offset(int x) { 343 int off = data_offset + instruction_start(); 344 u_char mod_rm = *(u_char*)(instruction_address() + 1); 345 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is 346 // the encoding to use an SIB byte. Which will have the nnnn 347 // field off by one byte 348 if ((mod_rm & 7) == 0x4) { 349 off++; 350 } 351 set_int_at(off, x); 352 } 353 354 void NativeMovRegMem::verify() { 355 // make sure code pattern is actually a mov [reg+offset], reg instruction 356 u_char test_byte = *(u_char*)instruction_address(); 357 switch (test_byte) { 358 case instruction_code_reg2memb: // 0x88 movb a, r 359 case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit) 360 case instruction_code_mem2regb: // 0x8a movb r, a 361 case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit) 362 break; 363 364 case instruction_code_mem2reg_movslq: // 0x63 movsql r, a 365 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb) 366 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw) 367 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb) 368 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw) 369 break; 370 371 case instruction_code_float_s: // 0xd9 fld_s a 372 case instruction_code_float_d: // 0xdd fld_d a 373 case instruction_code_xmm_load: // 0x10 movsd xmm, a 374 case instruction_code_xmm_store: // 0x11 movsd a, xmm 375 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a 376 break; 377 378 default: 379 fatal ("not a mov [reg+offs], reg instruction"); 380 } 381 } 382 383 384 void NativeMovRegMem::print() { 385 tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset()); 386 } 387 388 //------------------------------------------------------------------- 389 390 void NativeLoadAddress::verify() { 391 // make sure code pattern is actually a mov [reg+offset], reg instruction 392 u_char test_byte = *(u_char*)instruction_address(); 393 #ifdef _LP64 394 if ( (test_byte == instruction_prefix_wide || 395 test_byte == instruction_prefix_wide_extended) ) { 396 test_byte = *(u_char*)(instruction_address() + 1); 397 } 398 #endif // _LP64 399 if ( ! ((test_byte == lea_instruction_code) 400 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) { 401 fatal ("not a lea reg, [reg+offs] instruction"); 402 } 403 } 404 405 406 void NativeLoadAddress::print() { 407 tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset()); 408 } 409 410 //-------------------------------------------------------------------------------- 411 412 void NativeJump::verify() { 413 if (*(u_char*)instruction_address() != instruction_code) { 414 fatal("not a jump instruction"); 415 } 416 } 417 418 419 void NativeJump::insert(address code_pos, address entry) { 420 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 421 #ifdef AMD64 422 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 423 #endif // AMD64 424 425 *code_pos = instruction_code; 426 *((int32_t*)(code_pos + 1)) = (int32_t)disp; 427 428 ICache::invalidate_range(code_pos, instruction_size); 429 } 430 431 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { 432 // Patching to not_entrant can happen while activations of the method are 433 // in use. The patching in that instance must happen only when certain 434 // alignment restrictions are true. These guarantees check those 435 // conditions. 436 #ifdef AMD64 437 const int linesize = 64; 438 #else 439 const int linesize = 32; 440 #endif // AMD64 441 442 // Must be wordSize aligned 443 guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0, 444 "illegal address for code patching 2"); 445 // First 5 bytes must be within the same cache line - 4827828 446 guarantee((uintptr_t) verified_entry / linesize == 447 ((uintptr_t) verified_entry + 4) / linesize, 448 "illegal address for code patching 3"); 449 } 450 451 452 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) 453 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes. 454 // First patches the first word atomically to be a jump to itself. 455 // Then patches the last byte and then atomically patches the first word (4-bytes), 456 // thus inserting the desired jump 457 // This code is mt-safe with the following conditions: entry point is 4 byte aligned, 458 // entry point is in same cache line as unverified entry point, and the instruction being 459 // patched is >= 5 byte (size of patch). 460 // 461 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit. 462 // In C1 the restriction is enforced by CodeEmitter::method_entry 463 // 464 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { 465 // complete jump instruction (to be inserted) is in code_buffer; 466 unsigned char code_buffer[5]; 467 code_buffer[0] = instruction_code; 468 intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4); 469 #ifdef AMD64 470 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 471 #endif // AMD64 472 *(int32_t*)(code_buffer + 1) = (int32_t)disp; 473 474 check_verified_entry_alignment(entry, verified_entry); 475 476 // Can't call nativeJump_at() because it's asserts jump exists 477 NativeJump* n_jump = (NativeJump*) verified_entry; 478 479 //First patch dummy jmp in place 480 481 unsigned char patch[4]; 482 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); 483 patch[0] = 0xEB; // jmp rel8 484 patch[1] = 0xFE; // jmp to self 485 patch[2] = 0xEB; 486 patch[3] = 0xFE; 487 488 // First patch dummy jmp in place 489 *(int32_t*)verified_entry = *(int32_t *)patch; 490 491 n_jump->wrote(0); 492 493 // Patch 5th byte (from jump instruction) 494 verified_entry[4] = code_buffer[4]; 495 496 n_jump->wrote(4); 497 498 // Patch bytes 0-3 (from jump instruction) 499 *(int32_t*)verified_entry = *(int32_t *)code_buffer; 500 // Invalidate. Opteron requires a flush after every write. 501 n_jump->wrote(0); 502 503 } 504 505 void NativePopReg::insert(address code_pos, Register reg) { 506 assert(reg->encoding() < 8, "no space for REX"); 507 assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); 508 *code_pos = (u_char)(instruction_code | reg->encoding()); 509 ICache::invalidate_range(code_pos, instruction_size); 510 } 511 512 513 void NativeIllegalInstruction::insert(address code_pos) { 514 assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update"); 515 *(short *)code_pos = instruction_code; 516 ICache::invalidate_range(code_pos, instruction_size); 517 } 518 519 void NativeGeneralJump::verify() { 520 assert(((NativeInstruction *)this)->is_jump() || 521 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); 522 } 523 524 525 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { 526 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 527 #ifdef AMD64 528 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 529 #endif // AMD64 530 531 *code_pos = unconditional_long_jump; 532 *((int32_t *)(code_pos+1)) = (int32_t) disp; 533 ICache::invalidate_range(code_pos, instruction_size); 534 } 535 536 537 // MT-safe patching of a long jump instruction. 538 // First patches first word of instruction to two jmp's that jmps to them 539 // selfs (spinlock). Then patches the last byte, and then atomicly replaces 540 // the jmp's with the first 4 byte of the new instruction. 541 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { 542 assert (instr_addr != NULL, "illegal address for code patching (4)"); 543 NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump 544 545 // Temporary code 546 unsigned char patch[4]; 547 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); 548 patch[0] = 0xEB; // jmp rel8 549 patch[1] = 0xFE; // jmp to self 550 patch[2] = 0xEB; 551 patch[3] = 0xFE; 552 553 // First patch dummy jmp in place 554 *(int32_t*)instr_addr = *(int32_t *)patch; 555 n_jump->wrote(0); 556 557 // Patch 4th byte 558 instr_addr[4] = code_buffer[4]; 559 560 n_jump->wrote(4); 561 562 // Patch bytes 0-3 563 *(jint*)instr_addr = *(jint *)code_buffer; 564 565 n_jump->wrote(0); 566 567 #ifdef ASSERT 568 // verify patching 569 for ( int i = 0; i < instruction_size; i++) { 570 address ptr = (address)((intptr_t)code_buffer + i); 571 int a_byte = (*ptr) & 0xFF; 572 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); 573 } 574 #endif 575 576 } 577 578 579 580 address NativeGeneralJump::jump_destination() const { 581 int op_code = ubyte_at(0); 582 bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F); 583 int offset = (op_code == 0x0F) ? 2 : 1; 584 int length = offset + ((is_rel32off) ? 4 : 1); 585 586 if (is_rel32off) 587 return addr_at(0) + length + int_at(offset); 588 else 589 return addr_at(0) + length + sbyte_at(offset); 590 } 591 592 bool NativeInstruction::is_dtrace_trap() { 593 return (*(int32_t*)this & 0xff) == 0xcc; 594 }