1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "nativeInst_x86.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/handles.hpp" 31 #include "runtime/sharedRuntime.hpp" 32 #include "runtime/stubRoutines.hpp" 33 #include "utilities/ostream.hpp" 34 #ifdef COMPILER1 35 #include "c1/c1_Runtime1.hpp" 36 #endif 37 38 void NativeInstruction::wrote(int offset) { 39 ICache::invalidate_word(addr_at(offset)); 40 } 41 42 void NativeCall::verify() { 43 // Make sure code pattern is actually a call imm32 instruction. 44 int inst = ubyte_at(0); 45 if (inst != instruction_code) { 46 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), 47 inst); 48 fatal("not a call disp32"); 49 } 50 } 51 52 address NativeCall::destination() const { 53 // Getting the destination of a call isn't safe because that call can 54 // be getting patched while you're calling this. There's only special 55 // places where this can be called but not automatically verifiable by 56 // checking which locks are held. The solution is true atomic patching 57 // on x86, nyi. 58 return return_address() + displacement(); 59 } 60 61 void NativeCall::print() { 62 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT, 63 p2i(instruction_address()), p2i(destination())); 64 } 65 66 // Inserts a native call instruction at a given pc 67 void NativeCall::insert(address code_pos, address entry) { 68 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 69 #ifdef AMD64 70 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); 71 #endif // AMD64 72 *code_pos = instruction_code; 73 *((int32_t *)(code_pos+1)) = (int32_t) disp; 74 ICache::invalidate_range(code_pos, instruction_size); 75 } 76 77 // MT-safe patching of a call instruction. 78 // First patches first word of instruction to two jmp's that jmps to them 79 // selfs (spinlock). Then patches the last byte, and then atomicly replaces 80 // the jmp's with the first 4 byte of the new instruction. 81 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { 82 assert(Patching_lock->is_locked() || 83 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 84 assert (instr_addr != NULL, "illegal address for code patching"); 85 86 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call 87 if (os::is_MP()) { 88 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); 89 } 90 91 // First patch dummy jmp in place 92 unsigned char patch[4]; 93 assert(sizeof(patch)==sizeof(jint), "sanity check"); 94 patch[0] = 0xEB; // jmp rel8 95 patch[1] = 0xFE; // jmp to self 96 patch[2] = 0xEB; 97 patch[3] = 0xFE; 98 99 // First patch dummy jmp in place 100 *(jint*)instr_addr = *(jint *)patch; 101 102 // Invalidate. Opteron requires a flush after every write. 103 n_call->wrote(0); 104 105 // Patch 4th byte 106 instr_addr[4] = code_buffer[4]; 107 108 n_call->wrote(4); 109 110 // Patch bytes 0-3 111 *(jint*)instr_addr = *(jint *)code_buffer; 112 113 n_call->wrote(0); 114 115 #ifdef ASSERT 116 // verify patching 117 for ( int i = 0; i < instruction_size; i++) { 118 address ptr = (address)((intptr_t)code_buffer + i); 119 int a_byte = (*ptr) & 0xFF; 120 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); 121 } 122 #endif 123 124 } 125 126 127 // Similar to replace_mt_safe, but just changes the destination. The 128 // important thing is that free-running threads are able to execute this 129 // call instruction at all times. If the displacement field is aligned 130 // we can simply rely on atomicity of 32-bit writes to make sure other threads 131 // will see no intermediate states. Otherwise, the first two bytes of the 132 // call are guaranteed to be aligned, and can be atomically patched to a 133 // self-loop to guard the instruction while we change the other bytes. 134 135 // We cannot rely on locks here, since the free-running threads must run at 136 // full speed. 137 // 138 // Used in the runtime linkage of calls; see class CompiledIC. 139 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) 140 void NativeCall::set_destination_mt_safe(address dest) { 141 debug_only(verify()); 142 // Make sure patching code is locked. No two threads can patch at the same 143 // time but one may be executing this code. 144 assert(Patching_lock->is_locked() || 145 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 146 // Both C1 and C2 should now be generating code which aligns the patched address 147 // to be within a single cache line except that C1 does not do the alignment on 148 // uniprocessor systems. 149 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size == 150 ((uintptr_t)displacement_address() + 3) / cache_line_size; 151 152 guarantee(!os::is_MP() || is_aligned, "destination must be aligned"); 153 154 if (is_aligned) { 155 // Simple case: The destination lies within a single cache line. 156 set_destination(dest); 157 } else if ((uintptr_t)instruction_address() / cache_line_size == 158 ((uintptr_t)instruction_address()+1) / cache_line_size) { 159 // Tricky case: The instruction prefix lies within a single cache line. 160 intptr_t disp = dest - return_address(); 161 #ifdef AMD64 162 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); 163 #endif // AMD64 164 165 int call_opcode = instruction_address()[0]; 166 167 // First patch dummy jump in place: 168 { 169 u_char patch_jump[2]; 170 patch_jump[0] = 0xEB; // jmp rel8 171 patch_jump[1] = 0xFE; // jmp to self 172 173 assert(sizeof(patch_jump)==sizeof(short), "sanity check"); 174 *(short*)instruction_address() = *(short*)patch_jump; 175 } 176 // Invalidate. Opteron requires a flush after every write. 177 wrote(0); 178 179 // (Note: We assume any reader which has already started to read 180 // the unpatched call will completely read the whole unpatched call 181 // without seeing the next writes we are about to make.) 182 183 // Next, patch the last three bytes: 184 u_char patch_disp[5]; 185 patch_disp[0] = call_opcode; 186 *(int32_t*)&patch_disp[1] = (int32_t)disp; 187 assert(sizeof(patch_disp)==instruction_size, "sanity check"); 188 for (int i = sizeof(short); i < instruction_size; i++) 189 instruction_address()[i] = patch_disp[i]; 190 191 // Invalidate. Opteron requires a flush after every write. 192 wrote(sizeof(short)); 193 194 // (Note: We assume that any reader which reads the opcode we are 195 // about to repatch will also read the writes we just made.) 196 197 // Finally, overwrite the jump: 198 *(short*)instruction_address() = *(short*)patch_disp; 199 // Invalidate. Opteron requires a flush after every write. 200 wrote(0); 201 202 debug_only(verify()); 203 guarantee(destination() == dest, "patch succeeded"); 204 } else { 205 // Impossible: One or the other must be atomically writable. 206 ShouldNotReachHere(); 207 } 208 } 209 210 211 void NativeMovConstReg::verify() { 212 #ifdef AMD64 213 // make sure code pattern is actually a mov reg64, imm64 instruction 214 if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) || 215 (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) { 216 print(); 217 fatal("not a REX.W[B] mov reg64, imm64"); 218 } 219 #else 220 // make sure code pattern is actually a mov reg, imm32 instruction 221 u_char test_byte = *(u_char*)instruction_address(); 222 u_char test_byte_2 = test_byte & ( 0xff ^ register_mask); 223 if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32"); 224 #endif // AMD64 225 } 226 227 228 void NativeMovConstReg::print() { 229 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, 230 p2i(instruction_address()), data()); 231 } 232 233 //------------------------------------------------------------------- 234 235 int NativeMovRegMem::instruction_start() const { 236 int off = 0; 237 u_char instr_0 = ubyte_at(off); 238 239 // See comment in Assembler::locate_operand() about VEX prefixes. 240 if (instr_0 == instruction_VEX_prefix_2bytes) { 241 assert((UseAVX > 0), "shouldn't have VEX prefix"); 242 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); 243 return 2; 244 } 245 if (instr_0 == instruction_VEX_prefix_3bytes) { 246 assert((UseAVX > 0), "shouldn't have VEX prefix"); 247 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); 248 return 3; 249 } 250 251 // First check to see if we have a (prefixed or not) xor 252 if (instr_0 >= instruction_prefix_wide_lo && // 0x40 253 instr_0 <= instruction_prefix_wide_hi) { // 0x4f 254 off++; 255 instr_0 = ubyte_at(off); 256 } 257 258 if (instr_0 == instruction_code_xor) { 259 off += 2; 260 instr_0 = ubyte_at(off); 261 } 262 263 // Now look for the real instruction and the many prefix/size specifiers. 264 265 if (instr_0 == instruction_operandsize_prefix ) { // 0x66 266 off++; // Not SSE instructions 267 instr_0 = ubyte_at(off); 268 } 269 270 if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3 271 instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2 272 off++; 273 instr_0 = ubyte_at(off); 274 } 275 276 if ( instr_0 >= instruction_prefix_wide_lo && // 0x40 277 instr_0 <= instruction_prefix_wide_hi) { // 0x4f 278 off++; 279 instr_0 = ubyte_at(off); 280 } 281 282 283 if (instr_0 == instruction_extended_prefix ) { // 0x0f 284 off++; 285 } 286 287 return off; 288 } 289 290 address NativeMovRegMem::instruction_address() const { 291 return addr_at(instruction_start()); 292 } 293 294 address NativeMovRegMem::next_instruction_address() const { 295 address ret = instruction_address() + instruction_size; 296 u_char instr_0 = *(u_char*) instruction_address(); 297 switch (instr_0) { 298 case instruction_operandsize_prefix: 299 300 fatal("should have skipped instruction_operandsize_prefix"); 301 break; 302 303 case instruction_extended_prefix: 304 fatal("should have skipped instruction_extended_prefix"); 305 break; 306 307 case instruction_code_mem2reg_movslq: // 0x63 308 case instruction_code_mem2reg_movzxb: // 0xB6 309 case instruction_code_mem2reg_movsxb: // 0xBE 310 case instruction_code_mem2reg_movzxw: // 0xB7 311 case instruction_code_mem2reg_movsxw: // 0xBF 312 case instruction_code_reg2mem: // 0x89 (q/l) 313 case instruction_code_mem2reg: // 0x8B (q/l) 314 case instruction_code_reg2memb: // 0x88 315 case instruction_code_mem2regb: // 0x8a 316 317 case instruction_code_float_s: // 0xd9 fld_s a 318 case instruction_code_float_d: // 0xdd fld_d a 319 320 case instruction_code_xmm_load: // 0x10 321 case instruction_code_xmm_store: // 0x11 322 case instruction_code_xmm_lpd: // 0x12 323 { 324 // If there is an SIB then instruction is longer than expected 325 u_char mod_rm = *(u_char*)(instruction_address() + 1); 326 if ((mod_rm & 7) == 0x4) { 327 ret++; 328 } 329 } 330 case instruction_code_xor: 331 fatal("should have skipped xor lead in"); 332 break; 333 334 default: 335 fatal("not a NativeMovRegMem"); 336 } 337 return ret; 338 339 } 340 341 int NativeMovRegMem::offset() const{ 342 int off = data_offset + instruction_start(); 343 u_char mod_rm = *(u_char*)(instruction_address() + 1); 344 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is 345 // the encoding to use an SIB byte. Which will have the nnnn 346 // field off by one byte 347 if ((mod_rm & 7) == 0x4) { 348 off++; 349 } 350 return int_at(off); 351 } 352 353 void NativeMovRegMem::set_offset(int x) { 354 int off = data_offset + instruction_start(); 355 u_char mod_rm = *(u_char*)(instruction_address() + 1); 356 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is 357 // the encoding to use an SIB byte. Which will have the nnnn 358 // field off by one byte 359 if ((mod_rm & 7) == 0x4) { 360 off++; 361 } 362 set_int_at(off, x); 363 } 364 365 void NativeMovRegMem::verify() { 366 // make sure code pattern is actually a mov [reg+offset], reg instruction 367 u_char test_byte = *(u_char*)instruction_address(); 368 switch (test_byte) { 369 case instruction_code_reg2memb: // 0x88 movb a, r 370 case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit) 371 case instruction_code_mem2regb: // 0x8a movb r, a 372 case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit) 373 break; 374 375 case instruction_code_mem2reg_movslq: // 0x63 movsql r, a 376 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb) 377 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw) 378 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb) 379 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw) 380 break; 381 382 case instruction_code_float_s: // 0xd9 fld_s a 383 case instruction_code_float_d: // 0xdd fld_d a 384 case instruction_code_xmm_load: // 0x10 movsd xmm, a 385 case instruction_code_xmm_store: // 0x11 movsd a, xmm 386 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a 387 break; 388 389 default: 390 fatal ("not a mov [reg+offs], reg instruction"); 391 } 392 } 393 394 395 void NativeMovRegMem::print() { 396 tty->print_cr(PTR_FORMAT ": mov reg, [reg + %x]", p2i(instruction_address()), offset()); 397 } 398 399 //------------------------------------------------------------------- 400 401 void NativeLoadAddress::verify() { 402 // make sure code pattern is actually a mov [reg+offset], reg instruction 403 u_char test_byte = *(u_char*)instruction_address(); 404 #ifdef _LP64 405 if ( (test_byte == instruction_prefix_wide || 406 test_byte == instruction_prefix_wide_extended) ) { 407 test_byte = *(u_char*)(instruction_address() + 1); 408 } 409 #endif // _LP64 410 if ( ! ((test_byte == lea_instruction_code) 411 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) { 412 fatal ("not a lea reg, [reg+offs] instruction"); 413 } 414 } 415 416 417 void NativeLoadAddress::print() { 418 tty->print_cr(PTR_FORMAT ": lea [reg + %x], reg", p2i(instruction_address()), offset()); 419 } 420 421 //-------------------------------------------------------------------------------- 422 423 void NativeJump::verify() { 424 if (*(u_char*)instruction_address() != instruction_code) { 425 fatal("not a jump instruction"); 426 } 427 } 428 429 430 void NativeJump::insert(address code_pos, address entry) { 431 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 432 #ifdef AMD64 433 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 434 #endif // AMD64 435 436 *code_pos = instruction_code; 437 *((int32_t*)(code_pos + 1)) = (int32_t)disp; 438 439 ICache::invalidate_range(code_pos, instruction_size); 440 } 441 442 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { 443 // Patching to not_entrant can happen while activations of the method are 444 // in use. The patching in that instance must happen only when certain 445 // alignment restrictions are true. These guarantees check those 446 // conditions. 447 #ifdef AMD64 448 const int linesize = 64; 449 #else 450 const int linesize = 32; 451 #endif // AMD64 452 453 // Must be wordSize aligned 454 guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0, 455 "illegal address for code patching 2"); 456 // First 5 bytes must be within the same cache line - 4827828 457 guarantee((uintptr_t) verified_entry / linesize == 458 ((uintptr_t) verified_entry + 4) / linesize, 459 "illegal address for code patching 3"); 460 } 461 462 463 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) 464 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes. 465 // First patches the first word atomically to be a jump to itself. 466 // Then patches the last byte and then atomically patches the first word (4-bytes), 467 // thus inserting the desired jump 468 // This code is mt-safe with the following conditions: entry point is 4 byte aligned, 469 // entry point is in same cache line as unverified entry point, and the instruction being 470 // patched is >= 5 byte (size of patch). 471 // 472 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit. 473 // In C1 the restriction is enforced by CodeEmitter::method_entry 474 // In JVMCI, the restriction is enforced by HotSpotFrameContext.enter(...) 475 // 476 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { 477 // complete jump instruction (to be inserted) is in code_buffer; 478 unsigned char code_buffer[5]; 479 code_buffer[0] = instruction_code; 480 intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4); 481 #ifdef AMD64 482 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 483 #endif // AMD64 484 *(int32_t*)(code_buffer + 1) = (int32_t)disp; 485 486 check_verified_entry_alignment(entry, verified_entry); 487 488 // Can't call nativeJump_at() because it's asserts jump exists 489 NativeJump* n_jump = (NativeJump*) verified_entry; 490 491 //First patch dummy jmp in place 492 493 unsigned char patch[4]; 494 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); 495 patch[0] = 0xEB; // jmp rel8 496 patch[1] = 0xFE; // jmp to self 497 patch[2] = 0xEB; 498 patch[3] = 0xFE; 499 500 // First patch dummy jmp in place 501 *(int32_t*)verified_entry = *(int32_t *)patch; 502 503 n_jump->wrote(0); 504 505 // Patch 5th byte (from jump instruction) 506 verified_entry[4] = code_buffer[4]; 507 508 n_jump->wrote(4); 509 510 // Patch bytes 0-3 (from jump instruction) 511 *(int32_t*)verified_entry = *(int32_t *)code_buffer; 512 // Invalidate. Opteron requires a flush after every write. 513 n_jump->wrote(0); 514 515 } 516 517 void NativePopReg::insert(address code_pos, Register reg) { 518 assert(reg->encoding() < 8, "no space for REX"); 519 assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); 520 *code_pos = (u_char)(instruction_code | reg->encoding()); 521 ICache::invalidate_range(code_pos, instruction_size); 522 } 523 524 525 void NativeIllegalInstruction::insert(address code_pos) { 526 assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update"); 527 *(short *)code_pos = instruction_code; 528 ICache::invalidate_range(code_pos, instruction_size); 529 } 530 531 void NativeGeneralJump::verify() { 532 assert(((NativeInstruction *)this)->is_jump() || 533 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); 534 } 535 536 537 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { 538 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); 539 #ifdef AMD64 540 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); 541 #endif // AMD64 542 543 *code_pos = unconditional_long_jump; 544 *((int32_t *)(code_pos+1)) = (int32_t) disp; 545 ICache::invalidate_range(code_pos, instruction_size); 546 } 547 548 549 // MT-safe patching of a long jump instruction. 550 // First patches first word of instruction to two jmp's that jmps to them 551 // selfs (spinlock). Then patches the last byte, and then atomicly replaces 552 // the jmp's with the first 4 byte of the new instruction. 553 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { 554 assert (instr_addr != NULL, "illegal address for code patching (4)"); 555 NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump 556 557 // Temporary code 558 unsigned char patch[4]; 559 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); 560 patch[0] = 0xEB; // jmp rel8 561 patch[1] = 0xFE; // jmp to self 562 patch[2] = 0xEB; 563 patch[3] = 0xFE; 564 565 // First patch dummy jmp in place 566 *(int32_t*)instr_addr = *(int32_t *)patch; 567 n_jump->wrote(0); 568 569 // Patch 4th byte 570 instr_addr[4] = code_buffer[4]; 571 572 n_jump->wrote(4); 573 574 // Patch bytes 0-3 575 *(jint*)instr_addr = *(jint *)code_buffer; 576 577 n_jump->wrote(0); 578 579 #ifdef ASSERT 580 // verify patching 581 for ( int i = 0; i < instruction_size; i++) { 582 address ptr = (address)((intptr_t)code_buffer + i); 583 int a_byte = (*ptr) & 0xFF; 584 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); 585 } 586 #endif 587 588 } 589 590 591 592 address NativeGeneralJump::jump_destination() const { 593 int op_code = ubyte_at(0); 594 bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F); 595 int offset = (op_code == 0x0F) ? 2 : 1; 596 int length = offset + ((is_rel32off) ? 4 : 1); 597 598 if (is_rel32off) 599 return addr_at(0) + length + int_at(offset); 600 else 601 return addr_at(0) + length + sbyte_at(offset); 602 }