src/cpu/x86/vm/nativeInst_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/x86/vm

src/cpu/x86/vm/nativeInst_x86.cpp

Print this page




  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "nativeInst_x86.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/handles.hpp"
  31 #include "runtime/sharedRuntime.hpp"
  32 #include "runtime/stubRoutines.hpp"
  33 #include "utilities/ostream.hpp"
  34 #ifdef COMPILER1
  35 #include "c1/c1_Runtime1.hpp"
  36 #endif
  37 
  38 void NativeInstruction::wrote(int offset) {
  39   ICache::invalidate_word(addr_at(offset));
  40 }
  41 






















































































































  42 void NativeCall::verify() {
  43   // Make sure code pattern is actually a call imm32 instruction.
  44   int inst = ubyte_at(0);
  45   if (inst != instruction_code) {
  46     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
  47                                                         inst);
  48     fatal("not a call disp32");
  49   }
  50 }
  51 
  52 address NativeCall::destination() const {
  53   // Getting the destination of a call isn't safe because that call can
  54   // be getting patched while you're calling this.  There's only special
  55   // places where this can be called but not automatically verifiable by
  56   // checking which locks are held.  The solution is true atomic patching
  57   // on x86, nyi.
  58   return return_address() + displacement();
  59 }
  60 
  61 void NativeCall::print() {


 405   if ( (test_byte == instruction_prefix_wide ||
 406         test_byte == instruction_prefix_wide_extended) ) {
 407     test_byte = *(u_char*)(instruction_address() + 1);
 408   }
 409 #endif // _LP64
 410   if ( ! ((test_byte == lea_instruction_code)
 411           LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
 412     fatal ("not a lea reg, [reg+offs] instruction");
 413   }
 414 }
 415 
 416 
 417 void NativeLoadAddress::print() {
 418   tty->print_cr(PTR_FORMAT ": lea [reg + %x], reg", p2i(instruction_address()), offset());
 419 }
 420 
 421 //--------------------------------------------------------------------------------
 422 
 423 void NativeJump::verify() {
 424   if (*(u_char*)instruction_address() != instruction_code) {




 425     fatal("not a jump instruction");
 426   }

 427 }
 428 
 429 
 430 void NativeJump::insert(address code_pos, address entry) {
 431   intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
 432 #ifdef AMD64
 433   guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
 434 #endif // AMD64
 435 
 436   *code_pos = instruction_code;
 437   *((int32_t*)(code_pos + 1)) = (int32_t)disp;
 438 
 439   ICache::invalidate_range(code_pos, instruction_size);
 440 }
 441 
 442 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 443   // Patching to not_entrant can happen while activations of the method are
 444   // in use. The patching in that instance must happen only when certain
 445   // alignment restrictions are true. These guarantees check those
 446   // conditions.


 497   patch[2] = 0xEB;
 498   patch[3] = 0xFE;
 499 
 500   // First patch dummy jmp in place
 501   *(int32_t*)verified_entry = *(int32_t *)patch;
 502 
 503   n_jump->wrote(0);
 504 
 505   // Patch 5th byte (from jump instruction)
 506   verified_entry[4] = code_buffer[4];
 507 
 508   n_jump->wrote(4);
 509 
 510   // Patch bytes 0-3 (from jump instruction)
 511   *(int32_t*)verified_entry = *(int32_t *)code_buffer;
 512   // Invalidate.  Opteron requires a flush after every write.
 513   n_jump->wrote(0);
 514 
 515 }
 516 














 517 void NativePopReg::insert(address code_pos, Register reg) {
 518   assert(reg->encoding() < 8, "no space for REX");
 519   assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update");
 520   *code_pos = (u_char)(instruction_code | reg->encoding());
 521   ICache::invalidate_range(code_pos, instruction_size);
 522 }
 523 
 524 
 525 void NativeIllegalInstruction::insert(address code_pos) {
 526   assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update");
 527   *(short *)code_pos = instruction_code;
 528   ICache::invalidate_range(code_pos, instruction_size);
 529 }
 530 
 531 void NativeGeneralJump::verify() {
 532   assert(((NativeInstruction *)this)->is_jump() ||
 533          ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
 534 }
 535 
 536 




  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "nativeInst_x86.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/handles.hpp"
  31 #include "runtime/sharedRuntime.hpp"
  32 #include "runtime/stubRoutines.hpp"
  33 #include "utilities/ostream.hpp"
  34 #ifdef COMPILER1
  35 #include "c1/c1_Runtime1.hpp"
  36 #endif
  37 
  38 void NativeInstruction::wrote(int offset) {
  39   ICache::invalidate_word(addr_at(offset));
  40 }
  41 
  42 void NativeLoadGot::report_and_fail() const {
  43   tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
  44   fatal("not a indirect rip mov to rbx");
  45 }
  46 
  47 void NativeLoadGot::verify() const {
  48   if (has_rex) {
  49     int rex = ubyte_at(0);
  50     if (rex != rex_prefix) {
  51       report_and_fail();
  52     }
  53   }
  54 
  55   int inst = ubyte_at(rex_size);
  56   if (inst != instruction_code) {
  57     report_and_fail();
  58   }
  59   int modrm = ubyte_at(rex_size + 1);
  60   if (modrm != modrm_rbx_code && modrm != modrm_rax_code) {
  61     report_and_fail();
  62   }
  63 }
  64 
  65 intptr_t NativeLoadGot::data() const {
  66   return *(intptr_t *) got_address();
  67 }
  68 
  69 address NativePltCall::destination() const {
  70   NativeGotJump* jump = nativeGotJump_at(plt_jump());
  71   return jump->destination();
  72 }
  73 
  74 address NativePltCall::plt_entry() const {
  75   return return_address() + displacement();
  76 }
  77 
  78 address NativePltCall::plt_jump() const {
  79   address entry = plt_entry();
  80   // Virtual PLT code has move instruction first
  81   if (((NativeGotJump*)entry)->is_GotJump()) {
  82     return entry;
  83   } else {
  84     return nativeLoadGot_at(entry)->next_instruction_address();
  85   }
  86 }
  87 
  88 address NativePltCall::plt_load_got() const {
  89   address entry = plt_entry();
  90   if (!((NativeGotJump*)entry)->is_GotJump()) {
  91     // Virtual PLT code has move instruction first
  92     return entry;
  93   } else {
  94     // Static PLT code has move instruction second (from c2i stub)
  95     return nativeGotJump_at(entry)->next_instruction_address();
  96   }
  97 }
  98 
  99 address NativePltCall::plt_c2i_stub() const {
 100   address entry = plt_load_got();
 101   // This method should be called only for static calls which has C2I stub.
 102   NativeLoadGot* load = nativeLoadGot_at(entry);
 103   return entry;
 104 }
 105 
 106 address NativePltCall::plt_resolve_call() const {
 107   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 108   address entry = jump->next_instruction_address();
 109   if (((NativeGotJump*)entry)->is_GotJump()) {
 110     return entry;
 111   } else {
 112     // c2i stub 2 instructions
 113     entry = nativeLoadGot_at(entry)->next_instruction_address();
 114     return nativeGotJump_at(entry)->next_instruction_address();
 115   }
 116 }
 117 
 118 void NativePltCall::reset_to_plt_resolve_call() {
 119   set_destination_mt_safe(plt_resolve_call());
 120 }
 121 
 122 void NativePltCall::set_destination_mt_safe(address dest) {
 123   // rewriting the value in the GOT, it should always be aligned
 124   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 125   address* got = (address *) jump->got_address();
 126   *got = dest;
 127 }
 128 
 129 void NativePltCall::set_stub_to_clean() {
 130   NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub());
 131   NativeGotJump* jump          = nativeGotJump_at(method_loader->next_instruction_address());
 132   method_loader->set_data(0);
 133   jump->set_jump_destination((address)-1);
 134 }
 135 
 136 void NativePltCall::verify() const {
 137   // Make sure code pattern is actually a call rip+off32 instruction.
 138   int inst = ubyte_at(0);
 139   if (inst != instruction_code) {
 140     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
 141                                                         inst);
 142     fatal("not a call rip+off32");
 143   }
 144 }
 145 
 146 address NativeGotJump::destination() const {
 147   address *got_entry = (address *) got_address();
 148   return *got_entry;
 149 }
 150 
 151 void NativeGotJump::verify() const {
 152   int inst = ubyte_at(0);
 153   if (inst != instruction_code) {
 154     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
 155                                                         inst);
 156     fatal("not a indirect rip jump");
 157   }
 158 }
 159 
 160 void NativeCall::verify() {
 161   // Make sure code pattern is actually a call imm32 instruction.
 162   int inst = ubyte_at(0);
 163   if (inst != instruction_code) {
 164     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
 165                                                         inst);
 166     fatal("not a call disp32");
 167   }
 168 }
 169 
 170 address NativeCall::destination() const {
 171   // Getting the destination of a call isn't safe because that call can
 172   // be getting patched while you're calling this.  There's only special
 173   // places where this can be called but not automatically verifiable by
 174   // checking which locks are held.  The solution is true atomic patching
 175   // on x86, nyi.
 176   return return_address() + displacement();
 177 }
 178 
 179 void NativeCall::print() {


 523   if ( (test_byte == instruction_prefix_wide ||
 524         test_byte == instruction_prefix_wide_extended) ) {
 525     test_byte = *(u_char*)(instruction_address() + 1);
 526   }
 527 #endif // _LP64
 528   if ( ! ((test_byte == lea_instruction_code)
 529           LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
 530     fatal ("not a lea reg, [reg+offs] instruction");
 531   }
 532 }
 533 
 534 
 535 void NativeLoadAddress::print() {
 536   tty->print_cr(PTR_FORMAT ": lea [reg + %x], reg", p2i(instruction_address()), offset());
 537 }
 538 
 539 //--------------------------------------------------------------------------------
 540 
 541 void NativeJump::verify() {
 542   if (*(u_char*)instruction_address() != instruction_code) {
 543     // far jump
 544     NativeMovConstReg* mov = nativeMovConstReg_at(instruction_address());
 545     NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address());
 546     if (!jmp->is_jump_reg()) {
 547       fatal("not a jump instruction");
 548     }
 549   }
 550 }
 551 
 552 
 553 void NativeJump::insert(address code_pos, address entry) {
 554   intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
 555 #ifdef AMD64
 556   guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
 557 #endif // AMD64
 558 
 559   *code_pos = instruction_code;
 560   *((int32_t*)(code_pos + 1)) = (int32_t)disp;
 561 
 562   ICache::invalidate_range(code_pos, instruction_size);
 563 }
 564 
 565 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 566   // Patching to not_entrant can happen while activations of the method are
 567   // in use. The patching in that instance must happen only when certain
 568   // alignment restrictions are true. These guarantees check those
 569   // conditions.


 620   patch[2] = 0xEB;
 621   patch[3] = 0xFE;
 622 
 623   // First patch dummy jmp in place
 624   *(int32_t*)verified_entry = *(int32_t *)patch;
 625 
 626   n_jump->wrote(0);
 627 
 628   // Patch 5th byte (from jump instruction)
 629   verified_entry[4] = code_buffer[4];
 630 
 631   n_jump->wrote(4);
 632 
 633   // Patch bytes 0-3 (from jump instruction)
 634   *(int32_t*)verified_entry = *(int32_t *)code_buffer;
 635   // Invalidate.  Opteron requires a flush after every write.
 636   n_jump->wrote(0);
 637 
 638 }
 639 
 640 address NativeFarJump::jump_destination() const          {
 641   NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0));
 642   return (address)mov->data();
 643 }
 644 
 645 void NativeFarJump::verify() {
 646   if (is_far_jump()) {
 647     NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0));
 648     NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address());
 649     if (jmp->is_jump_reg()) return;
 650   }
 651   fatal("not a jump instruction");
 652 }
 653 
 654 void NativePopReg::insert(address code_pos, Register reg) {
 655   assert(reg->encoding() < 8, "no space for REX");
 656   assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update");
 657   *code_pos = (u_char)(instruction_code | reg->encoding());
 658   ICache::invalidate_range(code_pos, instruction_size);
 659 }
 660 
 661 
 662 void NativeIllegalInstruction::insert(address code_pos) {
 663   assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update");
 664   *(short *)code_pos = instruction_code;
 665   ICache::invalidate_range(code_pos, instruction_size);
 666 }
 667 
 668 void NativeGeneralJump::verify() {
 669   assert(((NativeInstruction *)this)->is_jump() ||
 670          ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
 671 }
 672 
 673 


src/cpu/x86/vm/nativeInst_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File