< prev index next >

src/hotspot/share/runtime/vframe.inline.hpp

Print this page
rev 53032 : imported patch 8215205
rev 53033 : imported patch at_scope


  27 
  28 #include "runtime/frame.inline.hpp"
  29 #include "runtime/vframe.hpp"
  30 
  31 inline vframeStreamCommon::vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) {
  32   _thread = thread;
  33 }
  34 
  35 inline intptr_t* vframeStreamCommon::frame_id() const        { return _frame.id(); }
  36 
  37 inline bool vframeStreamCommon::is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
  38 
  39 inline bool vframeStreamCommon::is_entry_frame() const       { return _frame.is_entry_frame(); }
  40 
  41 inline void vframeStreamCommon::next() {
  42   // handle frames with inlining
  43   if (_mode == compiled_mode    && fill_in_compiled_inlined_sender()) return;
  44 
  45   // handle general case
  46   do {

  47     _frame = _frame.sender(&_reg_map);
  48   } while (!fill_from_frame());
  49 }
  50 
  51 inline vframeStream::vframeStream(JavaThread* thread, bool stop_at_java_call_stub)
  52   : vframeStreamCommon(thread) {
  53   _stop_at_java_call_stub = stop_at_java_call_stub;
  54 
  55   if (!thread->has_last_Java_frame()) {
  56     _mode = at_end_mode;
  57     return;
  58   }
  59 
  60   _frame = _thread->last_frame();
  61   while (!fill_from_frame()) {

  62     _frame = _frame.sender(&_reg_map);
  63   }
  64 }
  65 
  66 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
  67   if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
  68     return false;
  69   }
  70   fill_from_compiled_frame(_sender_decode_offset);

  71   return true;
  72 }
  73 
  74 
  75 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
  76   _mode = compiled_mode;

  77 
  78   // Range check to detect ridiculous offsets.
  79   if (decode_offset == DebugInformationRecorder::serialized_null ||
  80       decode_offset < 0 ||
  81       decode_offset >= nm()->scopes_data_size()) {
  82     // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
  83     // If we read nmethod::scopes_data at serialized_null (== 0)
  84     // or if read some at other invalid offset, invalid values will be decoded.
  85     // Based on these values, invalid heap locations could be referenced
  86     // that could lead to crashes in product mode.
  87     // Therefore, do not use the decode offset if invalid, but fill the frame
  88     // as it were a native compiled frame (no Java-level assumptions).
  89 #ifdef ASSERT
  90     if (WizardMode) {
  91       ttyLocker ttyl;
  92       tty->print_cr("Error in fill_from_frame: pc_desc for "
  93                     INTPTR_FORMAT " not found or invalid at %d",
  94                     p2i(_frame.pc()), decode_offset);
  95       nm()->print();
  96       nm()->method()->print_codes();


 101 #endif
 102     // Provide a cheap fallback in product mode.  (See comment above.)
 103     fill_from_compiled_native_frame();
 104     return;
 105   }
 106 
 107   // Decode first part of scopeDesc
 108   DebugInfoReadStream buffer(nm(), decode_offset);
 109   _sender_decode_offset = buffer.read_int();
 110   _method               = buffer.read_method();
 111   _bci                  = buffer.read_bci();
 112 
 113   assert(_method->is_method(), "checking type of decoded method");
 114 }
 115 
 116 // The native frames are handled specially. We do not rely on ScopeDesc info
 117 // since the pc might not be exact due to the _last_native_pc trick.
 118 inline void vframeStreamCommon::fill_from_compiled_native_frame() {
 119   _mode = compiled_mode;
 120   _sender_decode_offset = DebugInformationRecorder::serialized_null;


 121   _method = nm()->method();
 122   _bci = 0;
 123 }
 124 
 125 inline bool vframeStreamCommon::fill_from_frame() {
 126   // Interpreted frame
 127   if (_frame.is_interpreted_frame()) {
 128     fill_from_interpreter_frame();
 129     return true;
 130   }
 131 
 132   // Compiled frame
 133 
 134   if (cb() != NULL && cb()->is_compiled()) {
 135     if (nm()->is_native_method()) {
 136       // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
 137       fill_from_compiled_native_frame();
 138     } else {
 139       PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
 140       int decode_offset;


 170 
 171           fill_from_compiled_native_frame();
 172 
 173           // There is something to be said for setting the mode to
 174           // at_end_mode to prevent trying to walk further up the
 175           // stack. There is evidence that if we walk any further
 176           // that we could produce a bad stack chain. However until
 177           // we see evidence that allowing this causes us to find
 178           // frames bad enough to cause segv's or assertion failures
 179           // we don't do it as while we may get a bad call chain the
 180           // probability is much higher (several magnitudes) that we
 181           // get good data.
 182 
 183           return true;
 184         }
 185         decode_offset = DebugInformationRecorder::serialized_null;
 186       } else {
 187         decode_offset = pc_desc->scope_decode_offset();
 188       }
 189       fill_from_compiled_frame(decode_offset);

 190     }
 191     return true;
 192   }
 193 
 194   // End of stack?
 195   if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
 196     _mode = at_end_mode;
 197     return true;
 198   }
 199 
 200   return false;
 201 }
 202 
 203 
 204 inline void vframeStreamCommon::fill_from_interpreter_frame() {
 205   Method* method = _frame.interpreter_frame_method();
 206   address   bcp    = _frame.interpreter_frame_bcp();
 207   int       bci    = method->validate_bci_from_bcp(bcp);
 208   // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
 209   // AsyncGetCallTrace interrupts the VM asynchronously. As a result


  27 
  28 #include "runtime/frame.inline.hpp"
  29 #include "runtime/vframe.hpp"
  30 
  31 inline vframeStreamCommon::vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) {
  32   _thread = thread;
  33 }
  34 
  35 inline intptr_t* vframeStreamCommon::frame_id() const        { return _frame.id(); }
  36 
  37 inline bool vframeStreamCommon::is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
  38 
  39 inline bool vframeStreamCommon::is_entry_frame() const       { return _frame.is_entry_frame(); }
  40 
  41 inline void vframeStreamCommon::next() {
  42   // handle frames with inlining
  43   if (_mode == compiled_mode    && fill_in_compiled_inlined_sender()) return;
  44 
  45   // handle general case
  46   do {
  47     _prev_frame = _frame;
  48     _frame = _frame.sender(&_reg_map);
  49   } while (!fill_from_frame());
  50 }
  51 
  52 inline vframeStream::vframeStream(JavaThread* thread, bool stop_at_java_call_stub)
  53   : vframeStreamCommon(thread) {
  54   _stop_at_java_call_stub = stop_at_java_call_stub;
  55 
  56   if (!thread->has_last_Java_frame()) {
  57     _mode = at_end_mode;
  58     return;
  59   }
  60 
  61   _frame = _thread->last_frame();
  62   while (!fill_from_frame()) {
  63     _prev_frame = _frame;
  64     _frame = _frame.sender(&_reg_map);
  65   }
  66 }
  67 
  68 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
  69   if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
  70     return false;
  71   }
  72   fill_from_compiled_frame(_sender_decode_offset);
  73   ++_vframe_id;
  74   return true;
  75 }
  76 
  77 
  78 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
  79   _mode = compiled_mode;
  80   _decode_offset = decode_offset;
  81 
  82   // Range check to detect ridiculous offsets.
  83   if (decode_offset == DebugInformationRecorder::serialized_null ||
  84       decode_offset < 0 ||
  85       decode_offset >= nm()->scopes_data_size()) {
  86     // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
  87     // If we read nmethod::scopes_data at serialized_null (== 0)
  88     // or if read some at other invalid offset, invalid values will be decoded.
  89     // Based on these values, invalid heap locations could be referenced
  90     // that could lead to crashes in product mode.
  91     // Therefore, do not use the decode offset if invalid, but fill the frame
  92     // as it were a native compiled frame (no Java-level assumptions).
  93 #ifdef ASSERT
  94     if (WizardMode) {
  95       ttyLocker ttyl;
  96       tty->print_cr("Error in fill_from_frame: pc_desc for "
  97                     INTPTR_FORMAT " not found or invalid at %d",
  98                     p2i(_frame.pc()), decode_offset);
  99       nm()->print();
 100       nm()->method()->print_codes();


 105 #endif
 106     // Provide a cheap fallback in product mode.  (See comment above.)
 107     fill_from_compiled_native_frame();
 108     return;
 109   }
 110 
 111   // Decode first part of scopeDesc
 112   DebugInfoReadStream buffer(nm(), decode_offset);
 113   _sender_decode_offset = buffer.read_int();
 114   _method               = buffer.read_method();
 115   _bci                  = buffer.read_bci();
 116 
 117   assert(_method->is_method(), "checking type of decoded method");
 118 }
 119 
 120 // The native frames are handled specially. We do not rely on ScopeDesc info
 121 // since the pc might not be exact due to the _last_native_pc trick.
 122 inline void vframeStreamCommon::fill_from_compiled_native_frame() {
 123   _mode = compiled_mode;
 124   _sender_decode_offset = DebugInformationRecorder::serialized_null;
 125   _decode_offset = DebugInformationRecorder::serialized_null;
 126   _vframe_id = 0;
 127   _method = nm()->method();
 128   _bci = 0;
 129 }
 130 
 131 inline bool vframeStreamCommon::fill_from_frame() {
 132   // Interpreted frame
 133   if (_frame.is_interpreted_frame()) {
 134     fill_from_interpreter_frame();
 135     return true;
 136   }
 137 
 138   // Compiled frame
 139 
 140   if (cb() != NULL && cb()->is_compiled()) {
 141     if (nm()->is_native_method()) {
 142       // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
 143       fill_from_compiled_native_frame();
 144     } else {
 145       PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
 146       int decode_offset;


 176 
 177           fill_from_compiled_native_frame();
 178 
 179           // There is something to be said for setting the mode to
 180           // at_end_mode to prevent trying to walk further up the
 181           // stack. There is evidence that if we walk any further
 182           // that we could produce a bad stack chain. However until
 183           // we see evidence that allowing this causes us to find
 184           // frames bad enough to cause segv's or assertion failures
 185           // we don't do it as while we may get a bad call chain the
 186           // probability is much higher (several magnitudes) that we
 187           // get good data.
 188 
 189           return true;
 190         }
 191         decode_offset = DebugInformationRecorder::serialized_null;
 192       } else {
 193         decode_offset = pc_desc->scope_decode_offset();
 194       }
 195       fill_from_compiled_frame(decode_offset);
 196       _vframe_id = 0;
 197     }
 198     return true;
 199   }
 200 
 201   // End of stack?
 202   if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
 203     _mode = at_end_mode;
 204     return true;
 205   }
 206 
 207   return false;
 208 }
 209 
 210 
 211 inline void vframeStreamCommon::fill_from_interpreter_frame() {
 212   Method* method = _frame.interpreter_frame_method();
 213   address   bcp    = _frame.interpreter_frame_bcp();
 214   int       bci    = method->validate_bci_from_bcp(bcp);
 215   // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
 216   // AsyncGetCallTrace interrupts the VM asynchronously. As a result
< prev index next >