< prev index next >

src/cpu/x86/vm/frame_x86.inline.hpp

Print this page

        

@@ -38,11 +38,15 @@
   _fp = NULL;
   _cb = NULL;
   _deopt_state = unknown;
 }
 
-inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
+inline void frame::init(Thread* thread, intptr_t* sp, intptr_t* fp, address pc) {
+  if (thread != NULL && thread->is_Java_thread() && SharedRuntime::is_memento_stack_trace_return_handler(pc)) {
+    pc = ((JavaThread*) thread)->memento_original_return_address();
+  }
+
   _sp = sp;
   _unextended_sp = sp;
   _fp = fp;
   _pc = pc;
   assert(pc != NULL, "no pc?");

@@ -54,17 +58,23 @@
     _pc = original_pc;
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
   }
+
+  assert(!SharedRuntime::is_memento_stack_trace_return_handler(_pc), "original return address not resolvable");
 }
 
-inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
-  init(sp, fp, pc);
+inline frame::frame(Thread* thread, intptr_t* sp, intptr_t* fp, address pc) {
+  init(thread, sp, fp, pc);
 }
 
-inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
+inline frame::frame(Thread* thread, intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
+  if (thread != NULL && thread->is_Java_thread() && SharedRuntime::is_memento_stack_trace_return_handler(pc)) {
+    pc = ((JavaThread*) thread)->memento_original_return_address();
+  }
+
   _sp = sp;
   _unextended_sp = unextended_sp;
   _fp = fp;
   _pc = pc;
   assert(pc != NULL, "no pc?");

@@ -77,18 +87,24 @@
     assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
   }
+
+  assert(!SharedRuntime::is_memento_stack_trace_return_handler(_pc), "original return address not resolvable");
 }
 
-inline frame::frame(intptr_t* sp, intptr_t* fp) {
+inline frame::frame(Thread* thread, intptr_t* sp, intptr_t* fp) {
   _sp = sp;
   _unextended_sp = sp;
   _fp = fp;
   _pc = (address)(sp[-1]);
 
+  if (thread != NULL && thread->is_Java_thread() && SharedRuntime::is_memento_stack_trace_return_handler(_pc)) {
+    _pc = ((JavaThread*) thread)->memento_original_return_address();
+  }
+
   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
   // when last_Java_sp is non-null but the pc fetched is junk. If we are truly
   // unlucky the junk value could be to a zombied method and we'll die on the
   // find_blob call. This is also why we can have no asserts on the validity
   // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler

@@ -106,10 +122,12 @@
     _pc = original_pc;
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
   }
+
+  assert(!SharedRuntime::is_memento_stack_trace_return_handler(_pc), "original return address not resolvable");
 }
 
 // Accessors
 
 inline bool frame::equal(frame other) const {

@@ -314,6 +332,46 @@
   guarantee(result_adr != NULL, "bad register save location");
 
   *result_adr = obj;
 }
 
+inline address* frame::raw_sender_pc_addr() {
+  address* sender_pc;
+
+  if (is_interpreted_frame()) {
+    sender_pc = sender_pc_addr();
+    assert(interpreter_frame_sender_sp() > (intptr_t*) sender_pc, "sender_sp should be below return address");
+  } else {
+    assert(_cb != NULL, "code blob is required");
+    assert(is_compiled_frame() || is_native_frame() || is_stub_frame(), "unexpected frame type");
+
+    // frame owned by optimizing compiler
+    int frame_size = _cb->frame_size();
+    assert(frame_size > 0, "must have non-zero frame size");
+    intptr_t* sender_sp = unextended_sp() + frame_size;
+
+    // On Intel the return_address is always the word on the stack
+    sender_pc = (address*) sender_sp-1;
+  }
+  assert(CodeCache::contains(*sender_pc), "must be in code cache");
+
+  return sender_pc;
+}
+
+inline void frame::memento_mark(Thread* thread) {
+  address& original_return_address = thread->memento_original_return_address();
+  assert(original_return_address == NULL, "only 1 frame can be patched per thread");
+
+  address* sender_pc = raw_sender_pc_addr();
+  original_return_address = *sender_pc;
+  *sender_pc = SharedRuntime::get_memento_stack_trace_return_handler();
+}
+
+inline bool frame::is_memento_marked(Thread* thread) {
+  bool memento_marked = *raw_sender_pc_addr() == SharedRuntime::get_memento_stack_trace_return_handler();
+  if (memento_marked) {
+    assert(thread->memento_original_return_address() != NULL, "original return address must be set if frame is patched");
+  }
+  return memento_marked;
+}
+
 #endif // CPU_X86_VM_FRAME_X86_INLINE_HPP
< prev index next >