1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_VFRAME_HPP 26 #define SHARE_VM_RUNTIME_VFRAME_HPP 27 28 #include "code/debugInfo.hpp" 29 #include "code/debugInfoRec.hpp" 30 #include "code/location.hpp" 31 #include "oops/oop.hpp" 32 #include "runtime/frame.hpp" 33 #include "runtime/frame.inline.hpp" 34 #include "runtime/stackValue.hpp" 35 #include "runtime/stackValueCollection.hpp" 36 #include "utilities/growableArray.hpp" 37 38 // vframes are virtual stack frames representing source level activations. 39 // A single frame may hold several source level activations in the case of 40 // optimized code. The debugging stored with the optimized code enables 41 // us to unfold a frame as a stack of vframes. 42 // A cVFrame represents an activation of a non-java method. 43 44 // The vframe inheritance hierarchy: 45 // - vframe 46 // - javaVFrame 47 // - interpretedVFrame 48 // - compiledVFrame ; (used for both compiled Java methods and native stubs) 49 // - externalVFrame 50 // - entryVFrame ; special frame created when calling Java from C 51 52 // - BasicLock 53 54 class vframe: public ResourceObj { 55 protected: 56 frame _fr; // Raw frame behind the virtual frame. 57 RegisterMap _reg_map; // Register map for the raw frame (used to handle callee-saved registers). 58 JavaThread* _thread; // The thread owning the raw frame. 59 60 vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread); 61 vframe(const frame* fr, JavaThread* thread); 62 public: 63 // Factory method for creating vframes 64 static vframe* new_vframe(const frame* f, const RegisterMap *reg_map, JavaThread* thread); 65 66 // Accessors 67 frame fr() const { return _fr; } 68 CodeBlob* cb() const { return _fr.cb(); } 69 nmethod* nm() const { 70 assert( cb() != NULL && cb()->is_nmethod(), "usage"); 71 return (nmethod*) cb(); 72 } 73 74 // ???? Does this need to be a copy? 75 frame* frame_pointer() { return &_fr; } 76 const RegisterMap* register_map() const { return &_reg_map; } 77 JavaThread* thread() const { return _thread; } 78 79 // Returns the sender vframe 80 virtual vframe* sender() const; 81 82 // Returns the next javaVFrame on the stack (skipping all other kinds of frame) 83 javaVFrame *java_sender() const; 84 85 // Answers if the this is the top vframe in the frame, i.e., if the sender vframe 86 // is in the caller frame 87 virtual bool is_top() const { return true; } 88 89 // Returns top vframe within same frame (see is_top()) 90 virtual vframe* top() const; 91 92 // Type testing operations 93 virtual bool is_entry_frame() const { return false; } 94 virtual bool is_java_frame() const { return false; } 95 virtual bool is_interpreted_frame() const { return false; } 96 virtual bool is_compiled_frame() const { return false; } 97 98 #ifndef PRODUCT 99 // printing operations 100 virtual void print_value() const; 101 virtual void print(); 102 #endif 103 }; 104 105 106 class javaVFrame: public vframe { 107 public: 108 // JVM state 109 virtual Method* method() const = 0; 110 virtual int bci() const = 0; 111 virtual StackValueCollection* locals() const = 0; 112 virtual StackValueCollection* expressions() const = 0; 113 // the order returned by monitors() is from oldest -> youngest#4418568 114 virtual GrowableArray<MonitorInfo*>* monitors() const = 0; 115 116 // Debugging support via JVMTI. 117 // NOTE that this is not guaranteed to give correct results for compiled vframes. 118 // Deoptimize first if necessary. 119 virtual void set_locals(StackValueCollection* values) const = 0; 120 121 // Test operation 122 bool is_java_frame() const { return true; } 123 124 protected: 125 javaVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {} 126 javaVFrame(const frame* fr, JavaThread* thread) : vframe(fr, thread) {} 127 128 public: 129 // casting 130 static javaVFrame* cast(vframe* vf) { 131 assert(vf == NULL || vf->is_java_frame(), "must be java frame"); 132 return (javaVFrame*) vf; 133 } 134 135 // Return an array of monitors locked by this frame in the youngest to oldest order 136 GrowableArray<MonitorInfo*>* locked_monitors(); 137 138 // printing used during stack dumps and diagnostics 139 static void print_locked_object_class_name(outputStream* st, Handle obj, const char* lock_state); 140 void print_lock_info_on(outputStream* st, int frame_count); 141 void print_lock_info(int frame_count) { print_lock_info_on(tty, frame_count); } 142 143 #ifndef PRODUCT 144 public: 145 // printing operations 146 void print(); 147 void print_value() const; 148 void print_activation(int index) const; 149 150 // verify operations 151 virtual void verify() const; 152 153 // Structural compare 154 bool structural_compare(javaVFrame* other); 155 #endif 156 friend class vframe; 157 }; 158 159 class interpretedVFrame: public javaVFrame { 160 public: 161 // JVM state 162 Method* method() const; 163 int bci() const; 164 StackValueCollection* locals() const; 165 StackValueCollection* expressions() const; 166 GrowableArray<MonitorInfo*>* monitors() const; 167 168 void set_locals(StackValueCollection* values) const; 169 170 // Test operation 171 bool is_interpreted_frame() const { return true; } 172 173 protected: 174 interpretedVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : javaVFrame(fr, reg_map, thread) {}; 175 176 public: 177 // Accessors for Byte Code Pointer 178 u_char* bcp() const; 179 void set_bcp(u_char* bcp); 180 181 // casting 182 static interpretedVFrame* cast(vframe* vf) { 183 assert(vf == NULL || vf->is_interpreted_frame(), "must be interpreted frame"); 184 return (interpretedVFrame*) vf; 185 } 186 187 private: 188 static const int bcp_offset; 189 intptr_t* locals_addr_at(int offset) const; 190 StackValueCollection* stack_data(bool expressions) const; 191 // returns where the parameters starts relative to the frame pointer 192 int start_of_parameters() const; 193 194 #ifndef PRODUCT 195 public: 196 // verify operations 197 void verify() const; 198 #endif 199 friend class vframe; 200 }; 201 202 203 class externalVFrame: public vframe { 204 protected: 205 externalVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {} 206 207 #ifndef PRODUCT 208 public: 209 // printing operations 210 void print_value() const; 211 void print(); 212 #endif 213 friend class vframe; 214 }; 215 216 class entryVFrame: public externalVFrame { 217 public: 218 bool is_entry_frame() const { return true; } 219 220 protected: 221 entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread); 222 223 public: 224 // casting 225 static entryVFrame* cast(vframe* vf) { 226 assert(vf == NULL || vf->is_entry_frame(), "must be entry frame"); 227 return (entryVFrame*) vf; 228 } 229 230 #ifndef PRODUCT 231 public: 232 // printing 233 void print_value() const; 234 void print(); 235 #endif 236 friend class vframe; 237 }; 238 239 240 // A MonitorInfo is a ResourceObject that describes a the pair: 241 // 1) the owner of the monitor 242 // 2) the monitor lock 243 class MonitorInfo : public ResourceObj { 244 private: 245 oop _owner; // the object owning the monitor 246 BasicLock* _lock; 247 oop _owner_klass; // klass (mirror) if owner was scalar replaced 248 bool _eliminated; 249 bool _owner_is_scalar_replaced; 250 public: 251 // Constructor 252 MonitorInfo(oop owner, BasicLock* lock, bool eliminated, bool owner_is_scalar_replaced) { 253 assert(owner == oopDesc::bs()->write_barrier(owner), "expect to-space copy"); 254 if (!owner_is_scalar_replaced) { 255 _owner = owner; 256 _owner_klass = NULL; 257 } else { 258 assert(eliminated, "monitor should be eliminated for scalar replaced object"); 259 _owner = NULL; 260 _owner_klass = owner; 261 } 262 _lock = lock; 263 _eliminated = eliminated; 264 _owner_is_scalar_replaced = owner_is_scalar_replaced; 265 } 266 // Accessors 267 oop owner() const { 268 assert(!_owner_is_scalar_replaced, "should not be called for scalar replaced object"); 269 assert(_owner == oopDesc::bs()->write_barrier(_owner), "expect to-space copy"); 270 return _owner; 271 } 272 oop owner_klass() const { 273 assert(_owner_is_scalar_replaced, "should not be called for not scalar replaced object"); 274 return _owner_klass; 275 } 276 BasicLock* lock() const { return _lock; } 277 bool eliminated() const { return _eliminated; } 278 bool owner_is_scalar_replaced() const { return _owner_is_scalar_replaced; } 279 }; 280 281 class vframeStreamCommon : StackObj { 282 protected: 283 // common 284 frame _frame; 285 JavaThread* _thread; 286 RegisterMap _reg_map; 287 enum { interpreted_mode, compiled_mode, at_end_mode } _mode; 288 289 int _sender_decode_offset; 290 291 // Cached information 292 Method* _method; 293 int _bci; 294 295 // Should VM activations be ignored or not 296 bool _stop_at_java_call_stub; 297 298 bool fill_in_compiled_inlined_sender(); 299 void fill_from_compiled_frame(int decode_offset); 300 void fill_from_compiled_native_frame(); 301 302 void found_bad_method_frame(); 303 304 void fill_from_interpreter_frame(); 305 bool fill_from_frame(); 306 307 // Helper routine for security_get_caller_frame 308 void skip_prefixed_method_and_wrappers(); 309 310 public: 311 // Constructor 312 vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) { 313 _thread = thread; 314 } 315 316 // Accessors 317 Method* method() const { return _method; } 318 int bci() const { return _bci; } 319 intptr_t* frame_id() const { return _frame.id(); } 320 address frame_pc() const { return _frame.pc(); } 321 322 CodeBlob* cb() const { return _frame.cb(); } 323 nmethod* nm() const { 324 assert( cb() != NULL && cb()->is_nmethod(), "usage"); 325 return (nmethod*) cb(); 326 } 327 328 // Frame type 329 bool is_interpreted_frame() const { return _frame.is_interpreted_frame(); } 330 bool is_entry_frame() const { return _frame.is_entry_frame(); } 331 332 // Iteration 333 void next() { 334 // handle frames with inlining 335 if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return; 336 337 // handle general case 338 do { 339 _frame = _frame.sender(&_reg_map); 340 } while (!fill_from_frame()); 341 } 342 void security_next(); 343 344 bool at_end() const { return _mode == at_end_mode; } 345 346 // Implements security traversal. Skips depth no. of frame including 347 // special security frames and prefixed native methods 348 void security_get_caller_frame(int depth); 349 350 // Helper routine for JVM_LatestUserDefinedLoader -- needed for 1.4 351 // reflection implementation 352 void skip_reflection_related_frames(); 353 }; 354 355 class vframeStream : public vframeStreamCommon { 356 public: 357 // Constructors 358 vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false) 359 : vframeStreamCommon(thread) { 360 _stop_at_java_call_stub = stop_at_java_call_stub; 361 362 if (!thread->has_last_Java_frame()) { 363 _mode = at_end_mode; 364 return; 365 } 366 367 _frame = _thread->last_frame(); 368 while (!fill_from_frame()) { 369 _frame = _frame.sender(&_reg_map); 370 } 371 } 372 373 // top_frame may not be at safepoint, start with sender 374 vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false); 375 }; 376 377 378 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() { 379 if (_sender_decode_offset == DebugInformationRecorder::serialized_null) { 380 return false; 381 } 382 fill_from_compiled_frame(_sender_decode_offset); 383 return true; 384 } 385 386 387 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) { 388 _mode = compiled_mode; 389 390 // Range check to detect ridiculous offsets. 391 if (decode_offset == DebugInformationRecorder::serialized_null || 392 decode_offset < 0 || 393 decode_offset >= nm()->scopes_data_size()) { 394 // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. 395 // If we read nmethod::scopes_data at serialized_null (== 0) 396 // or if read some at other invalid offset, invalid values will be decoded. 397 // Based on these values, invalid heap locations could be referenced 398 // that could lead to crashes in product mode. 399 // Therefore, do not use the decode offset if invalid, but fill the frame 400 // as it were a native compiled frame (no Java-level assumptions). 401 #ifdef ASSERT 402 if (WizardMode) { 403 tty->print_cr("Error in fill_from_frame: pc_desc for " 404 INTPTR_FORMAT " not found or invalid at %d", 405 p2i(_frame.pc()), decode_offset); 406 nm()->print(); 407 nm()->method()->print_codes(); 408 nm()->print_code(); 409 nm()->print_pcs(); 410 } 411 #endif 412 // Provide a cheap fallback in product mode. (See comment above.) 413 found_bad_method_frame(); 414 fill_from_compiled_native_frame(); 415 return; 416 } 417 418 // Decode first part of scopeDesc 419 DebugInfoReadStream buffer(nm(), decode_offset); 420 _sender_decode_offset = buffer.read_int(); 421 _method = buffer.read_method(); 422 _bci = buffer.read_bci(); 423 424 assert(_method->is_method(), "checking type of decoded method"); 425 } 426 427 // The native frames are handled specially. We do not rely on ScopeDesc info 428 // since the pc might not be exact due to the _last_native_pc trick. 429 inline void vframeStreamCommon::fill_from_compiled_native_frame() { 430 _mode = compiled_mode; 431 _sender_decode_offset = DebugInformationRecorder::serialized_null; 432 _method = nm()->method(); 433 _bci = 0; 434 } 435 436 inline bool vframeStreamCommon::fill_from_frame() { 437 // Interpreted frame 438 if (_frame.is_interpreted_frame()) { 439 fill_from_interpreter_frame(); 440 return true; 441 } 442 443 // Compiled frame 444 445 if (cb() != NULL && cb()->is_nmethod()) { 446 if (nm()->is_native_method()) { 447 // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick. 448 fill_from_compiled_native_frame(); 449 } else { 450 PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc()); 451 int decode_offset; 452 if (pc_desc == NULL) { 453 // Should not happen, but let fill_from_compiled_frame handle it. 454 455 // If we are trying to walk the stack of a thread that is not 456 // at a safepoint (like AsyncGetCallTrace would do) then this is an 457 // acceptable result. [ This is assuming that safe_for_sender 458 // is so bullet proof that we can trust the frames it produced. ] 459 // 460 // So if we see that the thread is not safepoint safe 461 // then simply produce the method and a bci of zero 462 // and skip the possibility of decoding any inlining that 463 // may be present. That is far better than simply stopping (or 464 // asserting. If however the thread is safepoint safe this 465 // is the sign of a compiler bug and we'll let 466 // fill_from_compiled_frame handle it. 467 468 469 JavaThreadState state = _thread->thread_state(); 470 471 // in_Java should be good enough to test safepoint safety 472 // if state were say in_Java_trans then we'd expect that 473 // the pc would have already been slightly adjusted to 474 // one that would produce a pcDesc since the trans state 475 // would be one that might in fact anticipate a safepoint 476 477 if (state == _thread_in_Java ) { 478 // This will get a method a zero bci and no inlining. 479 // Might be nice to have a unique bci to signify this 480 // particular case but for now zero will do. 481 482 fill_from_compiled_native_frame(); 483 484 // There is something to be said for setting the mode to 485 // at_end_mode to prevent trying to walk further up the 486 // stack. There is evidence that if we walk any further 487 // that we could produce a bad stack chain. However until 488 // we see evidence that allowing this causes us to find 489 // frames bad enough to cause segv's or assertion failures 490 // we don't do it as while we may get a bad call chain the 491 // probability is much higher (several magnitudes) that we 492 // get good data. 493 494 return true; 495 } 496 decode_offset = DebugInformationRecorder::serialized_null; 497 } else { 498 decode_offset = pc_desc->scope_decode_offset(); 499 } 500 fill_from_compiled_frame(decode_offset); 501 } 502 return true; 503 } 504 505 // End of stack? 506 if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) { 507 _mode = at_end_mode; 508 return true; 509 } 510 511 return false; 512 } 513 514 515 inline void vframeStreamCommon::fill_from_interpreter_frame() { 516 Method* method = _frame.interpreter_frame_method(); 517 address bcp = _frame.interpreter_frame_bcp(); 518 int bci = method->validate_bci_from_bcp(bcp); 519 // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. 520 // AsyncGetCallTrace interrupts the VM asynchronously. As a result 521 // it is possible to access an interpreter frame for which 522 // no Java-level information is yet available (e.g., becasue 523 // the frame was being created when the VM interrupted it). 524 // In this scenario, pretend that the interpreter is at the point 525 // of entering the method. 526 if (bci < 0) { 527 found_bad_method_frame(); 528 bci = 0; 529 } 530 _mode = interpreted_mode; 531 _method = method; 532 _bci = bci; 533 } 534 535 #endif // SHARE_VM_RUNTIME_VFRAME_HPP