src/cpu/x86/vm/nativeInst_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/nativeInst_x86.cpp	Mon Oct 31 17:46:19 2016
--- new/src/cpu/x86/vm/nativeInst_x86.cpp	Mon Oct 31 17:46:18 2016

*** 37,46 **** --- 37,164 ---- void NativeInstruction::wrote(int offset) { ICache::invalidate_word(addr_at(offset)); } + void NativeLoadGot::report_and_fail() const { + tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address())); + fatal("not a indirect rip mov to rbx"); + } + + void NativeLoadGot::verify() const { + if (has_rex) { + int rex = ubyte_at(0); + if (rex != rex_prefix) { + report_and_fail(); + } + } + + int inst = ubyte_at(rex_size); + if (inst != instruction_code) { + report_and_fail(); + } + int modrm = ubyte_at(rex_size + 1); + if (modrm != modrm_rbx_code && modrm != modrm_rax_code) { + report_and_fail(); + } + } + + intptr_t NativeLoadGot::data() const { + return *(intptr_t *) got_address(); + } + + address NativePltCall::destination() const { + NativeGotJump* jump = nativeGotJump_at(plt_jump()); + return jump->destination(); + } + + address NativePltCall::plt_entry() const { + return return_address() + displacement(); + } + + address NativePltCall::plt_jump() const { + address entry = plt_entry(); + // Virtual PLT code has move instruction first + if (((NativeGotJump*)entry)->is_GotJump()) { + return entry; + } else { + return nativeLoadGot_at(entry)->next_instruction_address(); + } + } + + address NativePltCall::plt_load_got() const { + address entry = plt_entry(); + if (!((NativeGotJump*)entry)->is_GotJump()) { + // Virtual PLT code has move instruction first + return entry; + } else { + // Static PLT code has move instruction second (from c2i stub) + return nativeGotJump_at(entry)->next_instruction_address(); + } + } + + address NativePltCall::plt_c2i_stub() const { + address entry = plt_load_got(); + // This method should be called only for static calls which has C2I stub. + NativeLoadGot* load = nativeLoadGot_at(entry); + return entry; + } + + address NativePltCall::plt_resolve_call() const { + NativeGotJump* jump = nativeGotJump_at(plt_jump()); + address entry = jump->next_instruction_address(); + if (((NativeGotJump*)entry)->is_GotJump()) { + return entry; + } else { + // c2i stub 2 instructions + entry = nativeLoadGot_at(entry)->next_instruction_address(); + return nativeGotJump_at(entry)->next_instruction_address(); + } + } + + void NativePltCall::reset_to_plt_resolve_call() { + set_destination_mt_safe(plt_resolve_call()); + } + + void NativePltCall::set_destination_mt_safe(address dest) { + // rewriting the value in the GOT, it should always be aligned + NativeGotJump* jump = nativeGotJump_at(plt_jump()); + address* got = (address *) jump->got_address(); + *got = dest; + } + + void NativePltCall::set_stub_to_clean() { + NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub()); + NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); + method_loader->set_data(0); + jump->set_jump_destination((address)-1); + } + + void NativePltCall::verify() const { + // Make sure code pattern is actually a call rip+off32 instruction. + int inst = ubyte_at(0); + if (inst != instruction_code) { + tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), + inst); + fatal("not a call rip+off32"); + } + } + + address NativeGotJump::destination() const { + address *got_entry = (address *) got_address(); + return *got_entry; + } + + void NativeGotJump::verify() const { + int inst = ubyte_at(0); + if (inst != instruction_code) { + tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), + inst); + fatal("not a indirect rip jump"); + } + } + void NativeCall::verify() { // Make sure code pattern is actually a call imm32 instruction. int inst = ubyte_at(0); if (inst != instruction_code) { tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
*** 420,431 **** --- 538,554 ---- //-------------------------------------------------------------------------------- void NativeJump::verify() { if (*(u_char*)instruction_address() != instruction_code) { + // far jump + NativeMovConstReg* mov = nativeMovConstReg_at(instruction_address()); + NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address()); + if (!jmp->is_jump_reg()) { fatal("not a jump instruction"); } + } } void NativeJump::insert(address code_pos, address entry) { intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
*** 512,521 **** --- 635,658 ---- // Invalidate. Opteron requires a flush after every write. n_jump->wrote(0); } + address NativeFarJump::jump_destination() const { + NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0)); + return (address)mov->data(); + } + + void NativeFarJump::verify() { + if (is_far_jump()) { + NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0)); + NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address()); + if (jmp->is_jump_reg()) return; + } + fatal("not a jump instruction"); + } + void NativePopReg::insert(address code_pos, Register reg) { assert(reg->encoding() < 8, "no space for REX"); assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); *code_pos = (u_char)(instruction_code | reg->encoding()); ICache::invalidate_range(code_pos, instruction_size);

src/cpu/x86/vm/nativeInst_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File