168 }
169 }
170
171 _location_valid[0] = lv;
172 check_location_valid();
173 }
174
175 bool frame::safe_for_sender(JavaThread *thread) {
176
177 address _SP = (address) sp();
178 address _FP = (address) fp();
179 address _UNEXTENDED_SP = (address) unextended_sp();
180
181 // consider stack guards when trying to determine "safe" stack pointers
182 // sp must be within the usable part of the stack (not in guards)
183 if (!thread->is_in_usable_stack(_SP)) {
184 return false;
185 }
186
187 // unextended sp must be within the stack and above or equal sp
188 bool unextended_sp_safe = (_UNEXTENDED_SP < thread->stack_base()) &&
189 (_UNEXTENDED_SP >= _SP);
190
191 if (!unextended_sp_safe) return false;
192
193 // an fp must be within the stack and above (but not equal) sp
194 bool fp_safe = (_FP < thread->stack_base()) &&
195 (_FP > _SP);
196
197 // We know sp/unextended_sp are safe only fp is questionable here
198
199 // If the current frame is known to the code cache then we can attempt to
200 // to construct the sender and do some validation of it. This goes a long way
201 // toward eliminating issues when we get in frame construction code
202
203 if (_cb != NULL ) {
204
205 // First check if frame is complete and tester is reliable
206 // Unfortunately we can only check frame complete for runtime stubs and nmethod
207 // other generic buffer blobs are more problematic so we just assume they are
208 // ok. adapter blobs never have a frame complete and are never ok.
209
210 if (!_cb->is_frame_complete_at(_pc)) {
211 if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
212 return false;
213 }
214 }
215
234 // We must always be able to find a recognizable pc
235 CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
236 if (sender_pc == NULL || sender_blob == NULL) {
237 return false;
238 }
239
240 // Could be a zombie method
241 if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
242 return false;
243 }
244
245 // It should be safe to construct the sender though it might not be valid
246
247 frame sender(_SENDER_SP, younger_sp, adjusted_stack);
248
249 // Do we have a valid fp?
250 address sender_fp = (address) sender.fp();
251
252 // an fp must be within the stack and above (but not equal) current frame's _FP
253
254 bool sender_fp_safe = (sender_fp < thread->stack_base()) &&
255 (sender_fp > _FP);
256
257 if (!sender_fp_safe) {
258 return false;
259 }
260
261
262 // If the potential sender is the interpreter then we can do some more checking
263 if (Interpreter::contains(sender_pc)) {
264 return sender.is_interpreted_frame_valid(thread);
265 }
266
267 // Could just be some random pointer within the codeBlob
268 if (!sender.cb()->code_contains(sender_pc)) {
269 return false;
270 }
271
272 // We should never be able to see an adapter if the current frame is something from code cache
273 if (sender_blob->is_adapter_blob()) {
274 return false;
275 }
276
277 if (sender.is_entry_frame()) {
278 // Validate the JavaCallWrapper an entry frame must have
279
280 address jcw = (address)sender.entry_frame_call_wrapper();
281
282 bool jcw_safe = (jcw < thread->stack_base()) && (jcw > sender_fp);
283
284 return jcw_safe;
285 }
286
287 // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
288 // because you must allocate window space
289
290 if (sender_blob->frame_size() <= 0) {
291 assert(!sender_blob->is_compiled(), "should count return address at least");
292 return false;
293 }
294
295 // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
296 // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
297 // window use. So if a runtime stub creates two frames (common in fastdebug/debug) then we see the
298 // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
299 // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
300 // that initial frame and retrying.
301
302 if (!sender_blob->is_compiled()) {
303 return false;
304 }
653 // stack frames shouldn't be much larger than max_stack elements
654
655 if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
656 return false;
657 }
658
659 // validate bci/bcp
660
661 address bcp = interpreter_frame_bcp();
662 if (m->validate_bci_from_bcp(bcp) < 0) {
663 return false;
664 }
665
666 // validate ConstantPoolCache*
667 ConstantPoolCache* cp = *interpreter_frame_cache_addr();
668 if (MetaspaceObj::is_valid(cp) == false) return false;
669
670 // validate locals
671
672 address locals = (address) *interpreter_frame_locals_addr();
673
674 if (locals >= thread->stack_base() || locals < (address) fp()) return false;
675
676 // We'd have to be pretty unlucky to be mislead at this point
677 return true;
678 }
679
680
681 // Windows have been flushed on entry (but not marked). Capture the pc that
682 // is the return address to the frame that contains "sp" as its stack pointer.
683 // This pc resides in the called of the frame corresponding to "sp".
684 // As a side effect we mark this JavaFrameAnchor as having flushed the windows.
685 // This side effect lets us mark stacked JavaFrameAnchors (stacked in the
686 // call_helper) as flushed when we have flushed the windows for the most
687 // recent (i.e. current) JavaFrameAnchor. This saves useless flushing calls
688 // and lets us find the pc just once rather than multiple times as it did
689 // in the bad old _post_Java_state days.
690 //
691 void JavaFrameAnchor::capture_last_Java_pc(intptr_t* sp) {
692 if (last_Java_sp() != NULL && last_Java_pc() == NULL) {
693 // try and find the sp just younger than _last_Java_sp
694 intptr_t* _post_Java_sp = frame::next_younger_sp_or_null(last_Java_sp(), sp);
695 // Really this should never fail otherwise VM call must have non-standard
696 // frame linkage (bad) or stack is not properly flushed (worse).
697 guarantee(_post_Java_sp != NULL, "bad stack!");
|
168 }
169 }
170
171 _location_valid[0] = lv;
172 check_location_valid();
173 }
174
175 bool frame::safe_for_sender(JavaThread *thread) {
176
177 address _SP = (address) sp();
178 address _FP = (address) fp();
179 address _UNEXTENDED_SP = (address) unextended_sp();
180
181 // consider stack guards when trying to determine "safe" stack pointers
182 // sp must be within the usable part of the stack (not in guards)
183 if (!thread->is_in_usable_stack(_SP)) {
184 return false;
185 }
186
187 // unextended sp must be within the stack and above or equal sp
188 if (!thread->is_in_stack_range_incl(_UNEXTENDED_SP, _SP)) {
189 return false;
190 }
191
192 // an fp must be within the stack and above (but not equal) sp
193 bool fp_safe = thread->is_in_stack_range_excl(_FP, _SP);
194
195 // We know sp/unextended_sp are safe only fp is questionable here
196
197 // If the current frame is known to the code cache then we can attempt to
198 // to construct the sender and do some validation of it. This goes a long way
199 // toward eliminating issues when we get in frame construction code
200
201 if (_cb != NULL ) {
202
203 // First check if frame is complete and tester is reliable
204 // Unfortunately we can only check frame complete for runtime stubs and nmethod
205 // other generic buffer blobs are more problematic so we just assume they are
206 // ok. adapter blobs never have a frame complete and are never ok.
207
208 if (!_cb->is_frame_complete_at(_pc)) {
209 if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
210 return false;
211 }
212 }
213
232 // We must always be able to find a recognizable pc
233 CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
234 if (sender_pc == NULL || sender_blob == NULL) {
235 return false;
236 }
237
238 // Could be a zombie method
239 if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
240 return false;
241 }
242
243 // It should be safe to construct the sender though it might not be valid
244
245 frame sender(_SENDER_SP, younger_sp, adjusted_stack);
246
247 // Do we have a valid fp?
248 address sender_fp = (address) sender.fp();
249
250 // an fp must be within the stack and above (but not equal) current frame's _FP
251
252 if (!thread->is_in_stack_range_excl(sender_fp, _FP)) {
253 return false;
254 }
255
256
257 // If the potential sender is the interpreter then we can do some more checking
258 if (Interpreter::contains(sender_pc)) {
259 return sender.is_interpreted_frame_valid(thread);
260 }
261
262 // Could just be some random pointer within the codeBlob
263 if (!sender.cb()->code_contains(sender_pc)) {
264 return false;
265 }
266
267 // We should never be able to see an adapter if the current frame is something from code cache
268 if (sender_blob->is_adapter_blob()) {
269 return false;
270 }
271
272 if (sender.is_entry_frame()) {
273 // Validate the JavaCallWrapper an entry frame must have
274 address jcw = (address)sender.entry_frame_call_wrapper();
275
276 return thread->is_in_stack_range_excl(jcw, sender_fp);
277 }
278
279 // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
280 // because you must allocate window space
281
282 if (sender_blob->frame_size() <= 0) {
283 assert(!sender_blob->is_compiled(), "should count return address at least");
284 return false;
285 }
286
287 // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
288 // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
289 // window use. So if a runtime stub creates two frames (common in fastdebug/debug) then we see the
290 // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
291 // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
292 // that initial frame and retrying.
293
294 if (!sender_blob->is_compiled()) {
295 return false;
296 }
645 // stack frames shouldn't be much larger than max_stack elements
646
647 if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
648 return false;
649 }
650
651 // validate bci/bcp
652
653 address bcp = interpreter_frame_bcp();
654 if (m->validate_bci_from_bcp(bcp) < 0) {
655 return false;
656 }
657
658 // validate ConstantPoolCache*
659 ConstantPoolCache* cp = *interpreter_frame_cache_addr();
660 if (MetaspaceObj::is_valid(cp) == false) return false;
661
662 // validate locals
663
664 address locals = (address) *interpreter_frame_locals_addr();
665 return thread->is_in_stack_range_incl(locals, (address)fp());
666 }
667
668
669 // Windows have been flushed on entry (but not marked). Capture the pc that
670 // is the return address to the frame that contains "sp" as its stack pointer.
671 // This pc resides in the called of the frame corresponding to "sp".
672 // As a side effect we mark this JavaFrameAnchor as having flushed the windows.
673 // This side effect lets us mark stacked JavaFrameAnchors (stacked in the
674 // call_helper) as flushed when we have flushed the windows for the most
675 // recent (i.e. current) JavaFrameAnchor. This saves useless flushing calls
676 // and lets us find the pc just once rather than multiple times as it did
677 // in the bad old _post_Java_state days.
678 //
679 void JavaFrameAnchor::capture_last_Java_pc(intptr_t* sp) {
680 if (last_Java_sp() != NULL && last_Java_pc() == NULL) {
681 // try and find the sp just younger than _last_Java_sp
682 intptr_t* _post_Java_sp = frame::next_younger_sp_or_null(last_Java_sp(), sp);
683 // Really this should never fail otherwise VM call must have non-standard
684 // frame linkage (bad) or stack is not properly flushed (worse).
685 guarantee(_post_Java_sp != NULL, "bad stack!");
|