1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP
  26 #define CPU_X86_VM_FRAME_X86_INLINE_HPP
  27 
  28 #include "code/codeCache.hpp"
  29 
  30 // Inline functions for Intel frames:
  31 
  32 // Constructors:
  33 
  34 inline frame::frame() {
  35   _pc = NULL;
  36   _sp = NULL;
  37   _unextended_sp = NULL;
  38   _fp = NULL;
  39   _cb = NULL;
  40   _deopt_state = unknown;
  41 }
  42 
  43 inline void frame::init(Thread* thread, intptr_t* sp, intptr_t* fp, address pc) {
  44   if (thread != NULL && thread->is_Java_thread() && SharedRuntime::is_memento_stack_trace_return_handler(pc)) {
  45     pc = ((JavaThread*) thread)->memento_original_return_address();
  46   }
  47 
  48   _sp = sp;
  49   _unextended_sp = sp;
  50   _fp = fp;
  51   _pc = pc;
  52   assert(pc != NULL, "no pc?");
  53   _cb = CodeCache::find_blob(pc);
  54   adjust_unextended_sp();
  55 
  56   address original_pc = nmethod::get_deopt_original_pc(this);
  57   if (original_pc != NULL) {
  58     _pc = original_pc;
  59     _deopt_state = is_deoptimized;
  60   } else {
  61     _deopt_state = not_deoptimized;
  62   }
  63 
  64   assert(!SharedRuntime::is_memento_stack_trace_return_handler(_pc), "original return address not resolvable");
  65 }
  66 
  67 inline frame::frame(Thread* thread, intptr_t* sp, intptr_t* fp, address pc) {
  68   init(thread, sp, fp, pc);
  69 }
  70 
  71 inline frame::frame(Thread* thread, intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
  72   if (thread != NULL && thread->is_Java_thread() && SharedRuntime::is_memento_stack_trace_return_handler(pc)) {
  73     pc = ((JavaThread*) thread)->memento_original_return_address();
  74   }
  75 
  76   _sp = sp;
  77   _unextended_sp = unextended_sp;
  78   _fp = fp;
  79   _pc = pc;
  80   assert(pc != NULL, "no pc?");
  81   _cb = CodeCache::find_blob(pc);
  82   adjust_unextended_sp();
  83 
  84   address original_pc = nmethod::get_deopt_original_pc(this);
  85   if (original_pc != NULL) {
  86     _pc = original_pc;
  87     assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
  88     _deopt_state = is_deoptimized;
  89   } else {
  90     _deopt_state = not_deoptimized;
  91   }
  92 
  93   assert(!SharedRuntime::is_memento_stack_trace_return_handler(_pc), "original return address not resolvable");
  94 }
  95 
  96 inline frame::frame(Thread* thread, intptr_t* sp, intptr_t* fp) {
  97   _sp = sp;
  98   _unextended_sp = sp;
  99   _fp = fp;
 100   _pc = (address)(sp[-1]);
 101 
 102   if (thread != NULL && thread->is_Java_thread() && SharedRuntime::is_memento_stack_trace_return_handler(_pc)) {
 103     _pc = ((JavaThread*) thread)->memento_original_return_address();
 104   }
 105 
 106   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
 107   // when last_Java_sp is non-null but the pc fetched is junk. If we are truly
 108   // unlucky the junk value could be to a zombied method and we'll die on the
 109   // find_blob call. This is also why we can have no asserts on the validity
 110   // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
 111   // -> pd_last_frame should use a specialized version of pd_last_frame which could
 112   // call a specialized frame constructor instead of this one.
 113   // Then we could use the assert below. However this assert is of somewhat dubious
 114   // value.
 115   // assert(_pc != NULL, "no pc?");
 116 
 117   _cb = CodeCache::find_blob(_pc);
 118   adjust_unextended_sp();
 119 
 120   address original_pc = nmethod::get_deopt_original_pc(this);
 121   if (original_pc != NULL) {
 122     _pc = original_pc;
 123     _deopt_state = is_deoptimized;
 124   } else {
 125     _deopt_state = not_deoptimized;
 126   }
 127 
 128   assert(!SharedRuntime::is_memento_stack_trace_return_handler(_pc), "original return address not resolvable");
 129 }
 130 
 131 // Accessors
 132 
 133 inline bool frame::equal(frame other) const {
 134   bool ret =  sp() == other.sp()
 135               && unextended_sp() == other.unextended_sp()
 136               && fp() == other.fp()
 137               && pc() == other.pc();
 138   assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
 139   return ret;
 140 }
 141 
 142 // Return unique id for this frame. The id must have a value where we can distinguish
 143 // identity and younger/older relationship. NULL represents an invalid (incomparable)
 144 // frame.
 145 inline intptr_t* frame::id(void) const { return unextended_sp(); }
 146 
 147 // Relationals on frames based
 148 // Return true if the frame is younger (more recent activation) than the frame represented by id
 149 inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
 150                                                     return this->id() < id ; }
 151 
 152 // Return true if the frame is older (less recent activation) than the frame represented by id
 153 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
 154                                                     return this->id() > id ; }
 155 
 156 
 157 
 158 inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
 159 inline void      frame::set_link(intptr_t* addr)  { *(intptr_t **)addr_at(link_offset) = addr; }
 160 
 161 
 162 inline intptr_t* frame::unextended_sp() const     { return _unextended_sp; }
 163 
 164 // Return address:
 165 
 166 inline address* frame::sender_pc_addr()      const { return (address*) addr_at( return_addr_offset); }
 167 inline address  frame::sender_pc()           const { return *sender_pc_addr(); }
 168 
 169 // return address of param, zero origin index.
 170 inline address* frame::native_param_addr(int idx) const { return (address*) addr_at( native_frame_initial_param_offset+idx); }
 171 
 172 #ifdef CC_INTERP
 173 
 174 inline interpreterState frame::get_interpreterState() const {
 175   return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
 176 }
 177 
 178 inline intptr_t*    frame::sender_sp()        const {
 179   // Hmm this seems awfully expensive QQQ, is this really called with interpreted frames?
 180   if (is_interpreted_frame()) {
 181     assert(false, "should never happen");
 182     return get_interpreterState()->sender_sp();
 183   } else {
 184     return            addr_at(sender_sp_offset);
 185   }
 186 }
 187 
 188 inline intptr_t** frame::interpreter_frame_locals_addr() const {
 189   assert(is_interpreted_frame(), "must be interpreted");
 190   return &(get_interpreterState()->_locals);
 191 }
 192 
 193 inline intptr_t* frame::interpreter_frame_bcx_addr() const {
 194   assert(is_interpreted_frame(), "must be interpreted");
 195   return (intptr_t*) &(get_interpreterState()->_bcp);
 196 }
 197 
 198 
 199 // Constant pool cache
 200 
 201 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
 202   assert(is_interpreted_frame(), "must be interpreted");
 203   return &(get_interpreterState()->_constants);
 204 }
 205 
 206 // Method
 207 
 208 inline Method** frame::interpreter_frame_method_addr() const {
 209   assert(is_interpreted_frame(), "must be interpreted");
 210   return &(get_interpreterState()->_method);
 211 }
 212 
 213 inline intptr_t* frame::interpreter_frame_mdx_addr() const {
 214   assert(is_interpreted_frame(), "must be interpreted");
 215   return (intptr_t*) &(get_interpreterState()->_mdx);
 216 }
 217 
 218 // top of expression stack
 219 inline intptr_t* frame::interpreter_frame_tos_address() const {
 220   assert(is_interpreted_frame(), "wrong frame type");
 221   return get_interpreterState()->_stack + 1;
 222 }
 223 
 224 #else /* asm interpreter */
 225 inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
 226 
 227 inline intptr_t** frame::interpreter_frame_locals_addr() const {
 228   return (intptr_t**)addr_at(interpreter_frame_locals_offset);
 229 }
 230 
 231 inline intptr_t* frame::interpreter_frame_last_sp() const {
 232   return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset);
 233 }
 234 
 235 inline intptr_t* frame::interpreter_frame_bcx_addr() const {
 236   return (intptr_t*)addr_at(interpreter_frame_bcx_offset);
 237 }
 238 
 239 
 240 inline intptr_t* frame::interpreter_frame_mdx_addr() const {
 241   return (intptr_t*)addr_at(interpreter_frame_mdx_offset);
 242 }
 243 
 244 
 245 
 246 // Constant pool cache
 247 
 248 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
 249   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
 250 }
 251 
 252 // Method
 253 
 254 inline Method** frame::interpreter_frame_method_addr() const {
 255   return (Method**)addr_at(interpreter_frame_method_offset);
 256 }
 257 
 258 // top of expression stack
 259 inline intptr_t* frame::interpreter_frame_tos_address() const {
 260   intptr_t* last_sp = interpreter_frame_last_sp();
 261   if (last_sp == NULL) {
 262     return sp();
 263   } else {
 264     // sp() may have been extended or shrunk by an adapter.  At least
 265     // check that we don't fall behind the legal region.
 266     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
 267     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
 268     return last_sp;
 269   }
 270 }
 271 
 272 inline oop* frame::interpreter_frame_temp_oop_addr() const {
 273   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
 274 }
 275 
 276 #endif /* CC_INTERP */
 277 
 278 inline int frame::pd_oop_map_offset_adjustment() const {
 279   return 0;
 280 }
 281 
 282 inline int frame::interpreter_frame_monitor_size() {
 283   return BasicObjectLock::size();
 284 }
 285 
 286 
 287 // expression stack
 288 // (the max_stack arguments are used by the GC; see class FrameClosure)
 289 
 290 inline intptr_t* frame::interpreter_frame_expression_stack() const {
 291   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
 292   return monitor_end-1;
 293 }
 294 
 295 
 296 inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
 297 
 298 
 299 // Entry frames
 300 
 301 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
 302  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
 303 }
 304 
 305 // Compiled frames
 306 
 307 inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
 308   return (nof_args - local_index + (local_index < nof_args ? 1: -1));
 309 }
 310 
 311 inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
 312   return local_offset_for_compiler(local_index, nof_args, max_nof_locals, max_nof_monitors);
 313 }
 314 
 315 inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) {
 316   return (nof_args - (max_nof_locals + max_nof_monitors*2) - 1);
 317 }
 318 
 319 inline bool frame::volatile_across_calls(Register reg) {
 320   return true;
 321 }
 322 
 323 inline oop frame::saved_oop_result(RegisterMap* map) const {
 324   oop* result_adr = (oop *)map->location(rax->as_VMReg());
 325   guarantee(result_adr != NULL, "bad register save location");
 326 
 327   return (*result_adr);
 328 }
 329 
 330 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
 331   oop* result_adr = (oop *)map->location(rax->as_VMReg());
 332   guarantee(result_adr != NULL, "bad register save location");
 333 
 334   *result_adr = obj;
 335 }
 336 
 337 inline address* frame::raw_sender_pc_addr() {
 338   address* sender_pc;
 339 
 340   if (is_interpreted_frame()) {
 341     sender_pc = sender_pc_addr();
 342     assert(interpreter_frame_sender_sp() > (intptr_t*) sender_pc, "sender_sp should be below return address");
 343   } else {
 344     assert(_cb != NULL, "code blob is required");
 345     assert(is_compiled_frame() || is_native_frame() || is_stub_frame(), "unexpected frame type");
 346 
 347     // frame owned by optimizing compiler
 348     int frame_size = _cb->frame_size();
 349     assert(frame_size > 0, "must have non-zero frame size");
 350     intptr_t* sender_sp = unextended_sp() + frame_size;
 351 
 352     // On Intel the return_address is always the word on the stack
 353     sender_pc = (address*) sender_sp-1;
 354   }
 355   assert(CodeCache::contains(*sender_pc), "must be in code cache");
 356 
 357   return sender_pc;
 358 }
 359 
 360 inline void frame::memento_mark(Thread* thread) {
 361   address& original_return_address = thread->memento_original_return_address();
 362   assert(original_return_address == NULL, "only 1 frame can be patched per thread");
 363 
 364   address* sender_pc = raw_sender_pc_addr();
 365   original_return_address = *sender_pc;
 366   *sender_pc = SharedRuntime::get_memento_stack_trace_return_handler();
 367 }
 368 
 369 inline bool frame::is_memento_marked(Thread* thread) {
 370   bool memento_marked = *raw_sender_pc_addr() == SharedRuntime::get_memento_stack_trace_return_handler();
 371   if (memento_marked) {
 372     assert(thread->memento_original_return_address() != NULL, "original return address must be set if frame is patched");
 373   }
 374   return memento_marked;
 375 }
 376 
 377 #endif // CPU_X86_VM_FRAME_X86_INLINE_HPP