--- old/src/cpu/x86/vm/frame_x86.inline.hpp 2016-10-25 10:40:02.672773225 +0200 +++ new/src/cpu/x86/vm/frame_x86.inline.hpp 2016-10-25 10:40:02.625773194 +0200 @@ -40,7 +40,11 @@ _deopt_state = unknown; } -inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) { +inline void frame::init(Thread* thread, intptr_t* sp, intptr_t* fp, address pc) { + if (thread != NULL && thread->is_Java_thread() && SharedRuntime::is_memento_stack_trace_return_handler(pc)) { + pc = ((JavaThread*) thread)->memento_original_return_address(); + } + _sp = sp; _unextended_sp = sp; _fp = fp; @@ -56,13 +60,19 @@ } else { _deopt_state = not_deoptimized; } + + assert(!SharedRuntime::is_memento_stack_trace_return_handler(_pc), "original return address not resolvable"); } -inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) { - init(sp, fp, pc); +inline frame::frame(Thread* thread, intptr_t* sp, intptr_t* fp, address pc) { + init(thread, sp, fp, pc); } -inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) { +inline frame::frame(Thread* thread, intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) { + if (thread != NULL && thread->is_Java_thread() && SharedRuntime::is_memento_stack_trace_return_handler(pc)) { + pc = ((JavaThread*) thread)->memento_original_return_address(); + } + _sp = sp; _unextended_sp = unextended_sp; _fp = fp; @@ -79,14 +89,20 @@ } else { _deopt_state = not_deoptimized; } + + assert(!SharedRuntime::is_memento_stack_trace_return_handler(_pc), "original return address not resolvable"); } -inline frame::frame(intptr_t* sp, intptr_t* fp) { +inline frame::frame(Thread* thread, intptr_t* sp, intptr_t* fp) { _sp = sp; _unextended_sp = sp; _fp = fp; _pc = (address)(sp[-1]); + if (thread != NULL && thread->is_Java_thread() && SharedRuntime::is_memento_stack_trace_return_handler(_pc)) { + _pc = ((JavaThread*) thread)->memento_original_return_address(); + } + // Here's a sticky one. This constructor can be called via AsyncGetCallTrace // when last_Java_sp is non-null but the pc fetched is junk. If we are truly // unlucky the junk value could be to a zombied method and we'll die on the @@ -108,6 +124,8 @@ } else { _deopt_state = not_deoptimized; } + + assert(!SharedRuntime::is_memento_stack_trace_return_handler(_pc), "original return address not resolvable"); } // Accessors @@ -316,4 +334,44 @@ *result_adr = obj; } +inline address* frame::raw_sender_pc_addr() { + address* sender_pc; + + if (is_interpreted_frame()) { + sender_pc = sender_pc_addr(); + assert(interpreter_frame_sender_sp() > (intptr_t*) sender_pc, "sender_sp should be below return address"); + } else { + assert(_cb != NULL, "code blob is required"); + assert(is_compiled_frame() || is_native_frame() || is_stub_frame(), "unexpected frame type"); + + // frame owned by optimizing compiler + int frame_size = _cb->frame_size(); + assert(frame_size > 0, "must have non-zero frame size"); + intptr_t* sender_sp = unextended_sp() + frame_size; + + // On Intel the return_address is always the word on the stack + sender_pc = (address*) sender_sp-1; + } + assert(CodeCache::contains(*sender_pc), "must be in code cache"); + + return sender_pc; +} + +inline void frame::memento_mark(Thread* thread) { + address& original_return_address = thread->memento_original_return_address(); + assert(original_return_address == NULL, "only 1 frame can be patched per thread"); + + address* sender_pc = raw_sender_pc_addr(); + original_return_address = *sender_pc; + *sender_pc = SharedRuntime::get_memento_stack_trace_return_handler(); +} + +inline bool frame::is_memento_marked(Thread* thread) { + bool memento_marked = *raw_sender_pc_addr() == SharedRuntime::get_memento_stack_trace_return_handler(); + if (memento_marked) { + assert(thread->memento_original_return_address() != NULL, "original return address must be set if frame is patched"); + } + return memento_marked; +} + #endif // CPU_X86_VM_FRAME_X86_INLINE_HPP