src/cpu/x86/vm/nativeInst_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Cdiff src/cpu/x86/vm/nativeInst_x86.hpp

src/cpu/x86/vm/nativeInst_x86.hpp

Print this page

        

*** 36,45 **** --- 36,46 ---- // - - NativeMovConstReg // - - NativeMovConstRegPatching // - - NativeMovRegMem // - - NativeMovRegMemPatching // - - NativeJump + // - - NativeFarJump // - - NativeIllegalOpCode // - - NativeGeneralJump // - - NativeReturn // - - NativeReturnX (return with argument) // - - NativePushConst
*** 61,70 **** --- 62,73 ---- inline bool is_call(); inline bool is_call_reg(); inline bool is_illegal(); inline bool is_return(); inline bool is_jump(); + inline bool is_jump_reg(); + inline bool is_far_jump(); inline bool is_cond_jump(); inline bool is_safepoint_poll(); inline bool is_mov_literal64(); protected:
*** 103,112 **** --- 106,156 ---- //inst->verify(); #endif return inst; } + class NativePltCall: public NativeInstruction { + public: + enum Intel_specific_constants { + instruction_code = 0xE8, + instruction_size = 5, + instruction_offset = 0, + displacement_offset = 1, + return_address_offset = 5 + }; + address instruction_address() const { return addr_at(instruction_offset); } + address next_instruction_address() const { return addr_at(return_address_offset); } + address displacement_address() const { return addr_at(displacement_offset); } + int displacement() const { return (jint) int_at(displacement_offset); } + address return_address() const { return addr_at(return_address_offset); } + address destination() const; + address plt_entry() const; + address plt_jump() const; + address plt_load_got() const; + address plt_resolve_call() const; + address plt_c2i_stub() const; + void set_stub_to_clean(); + + void reset_to_plt_resolve_call(); + void set_destination_mt_safe(address dest); + + void verify() const; + }; + + inline NativePltCall* nativePltCall_at(address address) { + NativePltCall* call = (NativePltCall*) address; + #ifdef ASSERT + call->verify(); + #endif + return call; + } + + inline NativePltCall* nativePltCall_before(address addr) { + address at = addr - NativePltCall::instruction_size; + return nativePltCall_at(at); + } + inline NativeCall* nativeCall_at(address address); // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off // instructions (used to manipulate inline caches, primitive & dll calls, etc.). class NativeCall: public NativeInstruction {
*** 127,139 **** address displacement_address() const { return addr_at(displacement_offset); } address return_address() const { return addr_at(return_address_offset); } address destination() const; void set_destination(address dest) { #ifdef AMD64 ! assert((labs((intptr_t) dest - (intptr_t) return_address()) & ! 0xFFFFFFFF00000000) == 0, ! "must be 32bit offset"); #endif // AMD64 set_int_at(displacement_offset, dest - return_address()); } void set_destination_mt_safe(address dest); --- 171,182 ---- address displacement_address() const { return addr_at(displacement_offset); } address return_address() const { return addr_at(return_address_offset); } address destination() const; void set_destination(address dest) { #ifdef AMD64 ! intptr_t disp = dest - return_address(); ! guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); #endif // AMD64 set_int_at(displacement_offset, dest - return_address()); } void set_destination_mt_safe(address dest);
*** 156,165 **** --- 199,215 ---- static bool is_call_to(address instr, address target) { return nativeInstruction_at(instr)->is_call() && nativeCall_at(instr)->destination() == target; } + #if INCLUDE_AOT + static bool is_far_call(address instr, address target) { + intptr_t disp = target - (instr + sizeof(int32_t)); + return !Assembler::is_simm32(disp); + } + #endif + // MT-safe patching of a call instruction. static void insert(address code_pos, address entry); static void replace_mt_safe(address instr_addr, address code_buffer); };
*** 378,387 **** --- 428,482 ---- #endif return test; } }; + // destination is rbx or rax + // mov rbx, [rip + offset] + class NativeLoadGot: public NativeInstruction { + #ifdef AMD64 + static const bool has_rex = true; + static const int rex_size = 1; + #else + static const bool has_rex = false; + static const int rex_size = 0; + #endif + public: + enum Intel_specific_constants { + rex_prefix = 0x48, + instruction_code = 0x8b, + modrm_rbx_code = 0x1d, + modrm_rax_code = 0x05, + instruction_length = 6 + rex_size, + offset_offset = 2 + rex_size + }; + + address instruction_address() const { return addr_at(0); } + address rip_offset_address() const { return addr_at(offset_offset); } + int rip_offset() const { return int_at(offset_offset); } + address return_address() const { return addr_at(instruction_length); } + address got_address() const { return return_address() + rip_offset(); } + address next_instruction_address() const { return return_address(); } + intptr_t data() const; + void set_data(intptr_t data) { + intptr_t *addr = (intptr_t *) got_address(); + *addr = data; + } + + void verify() const; + private: + void report_and_fail() const; + }; + + inline NativeLoadGot* nativeLoadGot_at(address addr) { + NativeLoadGot* load = (NativeLoadGot*) addr; + #ifdef ASSERT + load->verify(); + #endif + return load; + } + // jump rel32off class NativeJump: public NativeInstruction { public: enum Intel_specific_constants {
*** 438,447 **** --- 533,565 ---- jump->verify(); #endif return jump; } + // far jump reg + class NativeFarJump: public NativeInstruction { + public: + address jump_destination() const; + + // Creation + inline friend NativeFarJump* nativeFarJump_at(address address); + + void verify(); + + // Unit testing stuff + static void test() {} + + }; + + inline NativeFarJump* nativeFarJump_at(address address) { + NativeFarJump* jump = (NativeFarJump*)(address); + #ifdef ASSERT + jump->verify(); + #endif + return jump; + } + // Handles all kinds of jump on Intel. Long/far, conditional/unconditional class NativeGeneralJump: public NativeInstruction { public: enum Intel_specific_constants { // Constants does not apply, since the lengths and offsets depends on the actual jump
*** 471,480 **** --- 589,628 ---- NativeGeneralJump* jump = (NativeGeneralJump*)(address); debug_only(jump->verify();) return jump; } + class NativeGotJump: public NativeInstruction { + public: + enum Intel_specific_constants { + instruction_code = 0xff, + instruction_offset = 0, + instruction_size = 6, + rip_offset = 2 + }; + + void verify() const; + address instruction_address() const { return addr_at(instruction_offset); } + address destination() const; + address return_address() const { return addr_at(instruction_size); } + int got_offset() const { return (jint) int_at(rip_offset); } + address got_address() const { return return_address() + got_offset(); } + address next_instruction_address() const { return addr_at(instruction_size); } + bool is_GotJump() const { return ubyte_at(0) == instruction_code; } + + void set_jump_destination(address dest) { + address *got_entry = (address *) got_address(); + *got_entry = dest; + } + }; + + inline NativeGotJump* nativeGotJump_at(address addr) { + NativeGotJump* jump = (NativeGotJump*)(addr); + debug_only(jump->verify()); + return jump; + } + class NativePopReg : public NativeInstruction { public: enum Intel_specific_constants { instruction_code = 0x58, instruction_size = 1,
*** 542,551 **** --- 690,705 ---- (ubyte_at(0) == Assembler::REX || ubyte_at(0) == Assembler::REX_B)); } inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code || ubyte_at(0) == NativeReturnX::instruction_code; } inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code || ubyte_at(0) == 0xEB; /* short jump */ } + inline bool NativeInstruction::is_jump_reg() { + int pos = 0; + if (ubyte_at(0) == Assembler::REX_B) pos = 1; + return ubyte_at(pos) == 0xFF && (ubyte_at(pos + 1) & 0xF0) == 0xE0; + } + inline bool NativeInstruction::is_far_jump() { return is_mov_literal64(); } inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } inline bool NativeInstruction::is_safepoint_poll() { #ifdef AMD64 // Try decoding a near safepoint first:
src/cpu/x86/vm/nativeInst_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File