1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_VFRAME_INLINE_HPP
  26 #define SHARE_VM_RUNTIME_VFRAME_INLINE_HPP
  27 
  28 #include "runtime/frame.inline.hpp"
  29 #include "runtime/vframe.hpp"
  30 
  31 inline vframeStreamCommon::vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) {
  32   _thread = thread;
  33 }
  34 
  35 inline intptr_t* vframeStreamCommon::frame_id() const        { return _frame.id(); }
  36 
  37 inline bool vframeStreamCommon::is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
  38 
  39 inline bool vframeStreamCommon::is_entry_frame() const       { return _frame.is_entry_frame(); }
  40 
  41 inline void vframeStreamCommon::next() {
  42   // handle frames with inlining
  43   if (_mode == compiled_mode    && fill_in_compiled_inlined_sender()) return;
  44 
  45   // handle general case
  46   do {
  47     _frame = _frame.sender(&_reg_map);
  48   } while (!fill_from_frame());
  49 }
  50 
  51 inline vframeStream::vframeStream(JavaThread* thread, bool stop_at_java_call_stub)
  52   : vframeStreamCommon(thread) {
  53   _stop_at_java_call_stub = stop_at_java_call_stub;
  54 
  55   if (!thread->has_last_Java_frame()) {
  56     _mode = at_end_mode;
  57     return;
  58   }
  59 
  60   _frame = _thread->last_frame();
  61   while (!fill_from_frame()) {
  62     _frame = _frame.sender(&_reg_map);
  63   }
  64 }
  65 
  66 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
  67   if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
  68     return false;
  69   }
  70   fill_from_compiled_frame(_sender_decode_offset);
  71   return true;
  72 }
  73 
  74 
  75 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
  76   _mode = compiled_mode;
  77 
  78   // Range check to detect ridiculous offsets.
  79   if (decode_offset == DebugInformationRecorder::serialized_null ||
  80       decode_offset < 0 ||
  81       decode_offset >= nm()->scopes_data_size()) {
  82     // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
  83     // If we read nmethod::scopes_data at serialized_null (== 0)
  84     // or if read some at other invalid offset, invalid values will be decoded.
  85     // Based on these values, invalid heap locations could be referenced
  86     // that could lead to crashes in product mode.
  87     // Therefore, do not use the decode offset if invalid, but fill the frame
  88     // as it were a native compiled frame (no Java-level assumptions).
  89 #ifdef ASSERT
  90     if (WizardMode) {
  91       ttyLocker ttyl;
  92       tty->print_cr("Error in fill_from_frame: pc_desc for "
  93                     INTPTR_FORMAT " not found or invalid at %d",
  94                     p2i(_frame.pc()), decode_offset);
  95       nm()->print();
  96       nm()->method()->print_codes();
  97       nm()->print_code();
  98       nm()->print_pcs();
  99     }
 100     found_bad_method_frame();
 101 #endif
 102     // Provide a cheap fallback in product mode.  (See comment above.)
 103     fill_from_compiled_native_frame();
 104     return;
 105   }
 106 
 107   // Decode first part of scopeDesc
 108   DebugInfoReadStream buffer(nm(), decode_offset);
 109   _sender_decode_offset = buffer.read_int();
 110   _method               = buffer.read_method();
 111   _bci                  = buffer.read_bci();
 112 
 113   assert(_method->is_method(), "checking type of decoded method");
 114 }
 115 
 116 // The native frames are handled specially. We do not rely on ScopeDesc info
 117 // since the pc might not be exact due to the _last_native_pc trick.
 118 inline void vframeStreamCommon::fill_from_compiled_native_frame() {
 119   _mode = compiled_mode;
 120   _sender_decode_offset = DebugInformationRecorder::serialized_null;
 121   _method = nm()->method();
 122   _bci = 0;
 123 }
 124 
 125 inline bool vframeStreamCommon::fill_from_frame() {
 126   // Interpreted frame
 127   if (_frame.is_interpreted_frame()) {
 128     fill_from_interpreter_frame();
 129     return true;
 130   }
 131 
 132   // Compiled frame
 133 
 134   if (cb() != NULL && cb()->is_compiled()) {
 135     if (nm()->is_native_method()) {
 136       // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
 137       fill_from_compiled_native_frame();
 138     } else {
 139       PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
 140       int decode_offset;
 141       if (pc_desc == NULL) {
 142         // Should not happen, but let fill_from_compiled_frame handle it.
 143 
 144         // If we are trying to walk the stack of a thread that is not
 145         // at a safepoint (like AsyncGetCallTrace would do) then this is an
 146         // acceptable result. [ This is assuming that safe_for_sender
 147         // is so bullet proof that we can trust the frames it produced. ]
 148         //
 149         // So if we see that the thread is not safepoint safe
 150         // then simply produce the method and a bci of zero
 151         // and skip the possibility of decoding any inlining that
 152         // may be present. That is far better than simply stopping (or
 153         // asserting. If however the thread is safepoint safe this
 154         // is the sign of a compiler bug  and we'll let
 155         // fill_from_compiled_frame handle it.
 156 
 157 
 158         JavaThreadState state = _thread->thread_state();
 159 
 160         // in_Java should be good enough to test safepoint safety
 161         // if state were say in_Java_trans then we'd expect that
 162         // the pc would have already been slightly adjusted to
 163         // one that would produce a pcDesc since the trans state
 164         // would be one that might in fact anticipate a safepoint
 165 
 166         if (state == _thread_in_Java ) {
 167           // This will get a method a zero bci and no inlining.
 168           // Might be nice to have a unique bci to signify this
 169           // particular case but for now zero will do.
 170 
 171           fill_from_compiled_native_frame();
 172 
 173           // There is something to be said for setting the mode to
 174           // at_end_mode to prevent trying to walk further up the
 175           // stack. There is evidence that if we walk any further
 176           // that we could produce a bad stack chain. However until
 177           // we see evidence that allowing this causes us to find
 178           // frames bad enough to cause segv's or assertion failures
 179           // we don't do it as while we may get a bad call chain the
 180           // probability is much higher (several magnitudes) that we
 181           // get good data.
 182 
 183           return true;
 184         }
 185         decode_offset = DebugInformationRecorder::serialized_null;
 186       } else {
 187         decode_offset = pc_desc->scope_decode_offset();
 188       }
 189       fill_from_compiled_frame(decode_offset);
 190     }
 191     return true;
 192   }
 193 
 194   // End of stack?
 195   if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
 196     _mode = at_end_mode;
 197     return true;
 198   }
 199 
 200   return false;
 201 }
 202 
 203 
 204 inline void vframeStreamCommon::fill_from_interpreter_frame() {
 205   Method* method = _frame.interpreter_frame_method();
 206   address   bcp    = _frame.interpreter_frame_bcp();
 207   int       bci    = method->validate_bci_from_bcp(bcp);
 208   // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
 209   // AsyncGetCallTrace interrupts the VM asynchronously. As a result
 210   // it is possible to access an interpreter frame for which
 211   // no Java-level information is yet available (e.g., becasue
 212   // the frame was being created when the VM interrupted it).
 213   // In this scenario, pretend that the interpreter is at the point
 214   // of entering the method.
 215   if (bci < 0) {
 216     DEBUG_ONLY(found_bad_method_frame();)
 217     bci = 0;
 218   }
 219   _mode   = interpreted_mode;
 220   _method = method;
 221   _bci    = bci;
 222 }
 223 
 224 #endif // SHARE_VM_RUNTIME_VFRAME_INLINE_HPP