1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)frame_x86.cpp 1.219 07/09/17 09:36:42 JVM"
3 #endif
4 /*
5 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
26 */
27
28 # include "incls/_precompiled.incl"
29 # include "incls/_frame_x86.cpp.incl"
30
31 #ifdef ASSERT
32 void RegisterMap::check_location_valid() {
33 }
34 #endif
35
36
37 // Profiling/safepoint support
38
39 bool frame::safe_for_sender(JavaThread *thread) {
40 address sp = (address)_sp;
41 address fp = (address)_fp;
42 address unextended_sp = (address)_unextended_sp;
43 bool sp_safe = (sp != NULL &&
44 (sp <= thread->stack_base()) &&
45 (sp >= thread->stack_base() - thread->stack_size()));
46 bool unextended_sp_safe = (unextended_sp != NULL &&
47 (unextended_sp <= thread->stack_base()) &&
48 (unextended_sp >= thread->stack_base() - thread->stack_size()));
49 bool fp_safe = (fp != NULL &&
50 (fp <= thread->stack_base()) &&
51 (fp >= thread->stack_base() - thread->stack_size()));
52 if (sp_safe && unextended_sp_safe && fp_safe) {
53 // Unfortunately we can only check frame complete for runtime stubs and nmethod
54 // other generic buffer blobs are more problematic so we just assume they are
55 // ok. adapter blobs never have a frame complete and are never ok.
56 if (_cb != NULL && !_cb->is_frame_complete_at(_pc)) {
57 if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
58 return false;
59 }
60 }
61 return true;
62 }
63 // Note: fp == NULL is not really a prerequisite for this to be safe to
64 // walk for c2. However we've modified the code such that if we get
65 // a failure with fp != NULL that we then try with FP == NULL.
66 // This is basically to mimic what a last_frame would look like if
67 // c2 had generated it.
68 if (sp_safe && unextended_sp_safe && fp == NULL) {
69 // frame must be complete if fp == NULL as fp == NULL is only sensible
70 // if we are looking at a nmethod and frame complete assures us of that.
71 if (_cb != NULL && _cb->is_frame_complete_at(_pc) && _cb->is_compiled_by_c2()) {
72 return true;
73 }
74 }
75 return false;
76 }
77
78
79 void frame::patch_pc(Thread* thread, address pc) {
80 if (TracePcPatching) {
81 tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", &((address *)sp())[-1], ((address *)sp())[-1], pc);
82 }
83 ((address *)sp())[-1] = pc;
84 _cb = CodeCache::find_blob(pc);
85 if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
86 address orig = (((nmethod*)_cb)->get_original_pc(this));
87 assert(orig == _pc, "expected original to be stored before patching");
88 _deopt_state = is_deoptimized;
89 // leave _pc as is
90 } else {
91 _deopt_state = not_deoptimized;
92 _pc = pc;
93 }
94 }
95
96 bool frame::is_interpreted_frame() const {
97 return Interpreter::contains(pc());
98 }
99
100 int frame::frame_size() const {
101 RegisterMap map(JavaThread::current(), false);
278 // Must be native-compiled frame, i.e. the marshaling code for native
279 // methods that exists in the core system.
280 return frame(sender_sp(), link(), sender_pc());
281 }
282
283
284 bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
285 assert(is_interpreted_frame(), "must be interpreter frame");
286 methodOop method = interpreter_frame_method();
287 // When unpacking an optimized frame the frame pointer is
288 // adjusted with:
289 int diff = (method->max_locals() - method->size_of_parameters()) *
290 Interpreter::stackElementWords();
291 return _fp == (fp - diff);
292 }
293
294 void frame::pd_gc_epilog() {
295 // nothing done here now
296 }
297
298 bool frame::is_interpreted_frame_valid() const {
299 // QQQ
300 #ifdef CC_INTERP
301 #else
302 assert(is_interpreted_frame(), "Not an interpreted frame");
303 // These are reasonable sanity checks
304 if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
305 return false;
306 }
307 if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
308 return false;
309 }
310 if (fp() + interpreter_frame_initial_sp_offset < sp()) {
311 return false;
312 }
313 // These are hacks to keep us out of trouble.
314 // The problem with these is that they mask other problems
315 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
316 return false;
317 }
318 if (fp() - sp() > 4096) { // stack frames shouldn't be large.
319 return false;
320 }
321 #endif // CC_INTERP
322 return true;
323 }
324
325 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
326 #ifdef CC_INTERP
327 // Needed for JVMTI. The result should always be in the interpreterState object
328 assert(false, "NYI");
329 interpreterState istate = get_interpreterState();
330 #endif // CC_INTERP
331 assert(is_interpreted_frame(), "interpreted frame expected");
332 methodOop method = interpreter_frame_method();
333 BasicType type = method->result_type();
334
335 intptr_t* tos_addr;
336 if (method->is_native()) {
337 // Prior to calling into the runtime to report the method_exit the possible
338 // return value is pushed to the native stack. If the result is a jfloat/jdouble
339 // then ST0 is saved before EAX/EDX. See the note in generate_native_result
340 tos_addr = (intptr_t*)sp();
|
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_frame_x86.cpp.incl"
27
28 #ifdef ASSERT
29 void RegisterMap::check_location_valid() {
30 }
31 #endif
32
33
34 // Profiling/safepoint support
35
36 bool frame::safe_for_sender(JavaThread *thread) {
37 address sp = (address)_sp;
38 address fp = (address)_fp;
39 address unextended_sp = (address)_unextended_sp;
40 // sp must be within the stack
41 bool sp_safe = (sp <= thread->stack_base()) &&
42 (sp >= thread->stack_base() - thread->stack_size());
43
44 if (!sp_safe) {
45 return false;
46 }
47
48 // unextended sp must be within the stack and above or equal sp
49 bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) &&
50 (unextended_sp >= sp);
51
52 if (!unextended_sp_safe) {
53 return false;
54 }
55
56 // an fp must be within the stack and above (but not equal) sp
57 bool fp_safe = (fp <= thread->stack_base()) && (fp > sp);
58
59 // We know sp/unextended_sp are safe only fp is questionable here
60
61 // If the current frame is known to the code cache then we can attempt to
62 // to construct the sender and do some validation of it. This goes a long way
63 // toward eliminating issues when we get in frame construction code
64
65 if (_cb != NULL ) {
66
67 // First check if frame is complete and tester is reliable
68 // Unfortunately we can only check frame complete for runtime stubs and nmethod
69 // other generic buffer blobs are more problematic so we just assume they are
70 // ok. adapter blobs never have a frame complete and are never ok.
71
72 if (!_cb->is_frame_complete_at(_pc)) {
73 if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
74 return false;
75 }
76 }
77 // Entry frame checks
78 if (is_entry_frame()) {
79 // an entry frame must have a valid fp.
80
81 if (!fp_safe) return false;
82
83 // Validate the JavaCallWrapper an entry frame must have
84
85 address jcw = (address)entry_frame_call_wrapper();
86
87 bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp);
88
89 return jcw_safe;
90
91 }
92
93 intptr_t* sender_sp = NULL;
94 address sender_pc = NULL;
95
96 if (is_interpreted_frame()) {
97 // fp must be safe
98 if (!fp_safe) {
99 return false;
100 }
101
102 sender_pc = (address) this->fp()[return_addr_offset];
103 sender_sp = (intptr_t*) addr_at(sender_sp_offset);
104
105 } else {
106 // must be some sort of compiled/runtime frame
107 // fp does not have to be safe (although it could be check for c1?)
108
109 sender_sp = _unextended_sp + _cb->frame_size();
110 // On Intel the return_address is always the word on the stack
111 sender_pc = (address) *(sender_sp-1);
112 }
113
114 // We must always be able to find a recognizable pc
115 CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
116 if (sender_pc == NULL || sender_blob == NULL) {
117 return false;
118 }
119
120
121 // If the potential sender is the interpreter then we can do some more checking
122 if (Interpreter::contains(sender_pc)) {
123
124 // ebp is always saved in a recognizable place in any code we generate. However
125 // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved ebp
126 // is really a frame pointer.
127
128 intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
129 bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
130
131 if (!saved_fp_safe) {
132 return false;
133 }
134
135 // construct the potential sender
136
137 frame sender(sender_sp, saved_fp, sender_pc);
138
139 return sender.is_interpreted_frame_valid(thread);
140
141 }
142
143 // Could just be some random pointer within the codeBlob
144
145 if (!sender_blob->instructions_contains(sender_pc)) return false;
146
147 // We should never be able to see an adapter if the current frame is something from code cache
148
149 if ( sender_blob->is_adapter_blob()) {
150 return false;
151 }
152
153 // Could be the call_stub
154
155 if (StubRoutines::returns_to_call_stub(sender_pc)) {
156 intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
157 bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
158
159 if (!saved_fp_safe) {
160 return false;
161 }
162
163 // construct the potential sender
164
165 frame sender(sender_sp, saved_fp, sender_pc);
166
167 // Validate the JavaCallWrapper an entry frame must have
168 address jcw = (address)sender.entry_frame_call_wrapper();
169
170 bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp());
171
172 return jcw_safe;
173 }
174
175 // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
176 // because the return address counts against the callee's frame.
177
178 if (sender_blob->frame_size() == 0) {
179 assert(!sender_blob->is_nmethod(), "should count return address at least");
180 return false;
181 }
182
183 // We should never be able to see anything here except an nmethod. If something in the
184 // code cache (current frame) is called by an entity within the code cache that entity
185 // should not be anything but the call stub (already covered), the interpreter (already covered)
186 // or an nmethod.
187
188 assert(sender_blob->is_nmethod(), "Impossible call chain");
189
190 // Could put some more validation for the potential non-interpreted sender
191 // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
192
193 // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
194
195 // We've validated the potential sender that would be created
196 return true;
197 }
198
199 // Must be native-compiled frame. Since sender will try and use fp to find
200 // linkages it must be safe
201
202 if (!fp_safe) {
203 return false;
204 }
205
206 // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
207
208 if ( (address) this->fp()[return_addr_offset] == NULL) return false;
209
210
211 // could try and do some more potential verification of native frame if we could think of some...
212
213 return true;
214
215 }
216
217
218 void frame::patch_pc(Thread* thread, address pc) {
219 if (TracePcPatching) {
220 tty->print_cr("patch_pc at address" INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "] ",
221 &((address *)sp())[-1], ((address *)sp())[-1], pc);
222 }
223 ((address *)sp())[-1] = pc;
224 _cb = CodeCache::find_blob(pc);
225 if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
226 address orig = (((nmethod*)_cb)->get_original_pc(this));
227 assert(orig == _pc, "expected original to be stored before patching");
228 _deopt_state = is_deoptimized;
229 // leave _pc as is
230 } else {
231 _deopt_state = not_deoptimized;
232 _pc = pc;
233 }
234 }
235
236 bool frame::is_interpreted_frame() const {
237 return Interpreter::contains(pc());
238 }
239
240 int frame::frame_size() const {
241 RegisterMap map(JavaThread::current(), false);
418 // Must be native-compiled frame, i.e. the marshaling code for native
419 // methods that exists in the core system.
420 return frame(sender_sp(), link(), sender_pc());
421 }
422
423
424 bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
425 assert(is_interpreted_frame(), "must be interpreter frame");
426 methodOop method = interpreter_frame_method();
427 // When unpacking an optimized frame the frame pointer is
428 // adjusted with:
429 int diff = (method->max_locals() - method->size_of_parameters()) *
430 Interpreter::stackElementWords();
431 return _fp == (fp - diff);
432 }
433
434 void frame::pd_gc_epilog() {
435 // nothing done here now
436 }
437
438 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
439 // QQQ
440 #ifdef CC_INTERP
441 #else
442 assert(is_interpreted_frame(), "Not an interpreted frame");
443 // These are reasonable sanity checks
444 if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
445 return false;
446 }
447 if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
448 return false;
449 }
450 if (fp() + interpreter_frame_initial_sp_offset < sp()) {
451 return false;
452 }
453 // These are hacks to keep us out of trouble.
454 // The problem with these is that they mask other problems
455 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
456 return false;
457 }
458
459 // do some validation of frame elements
460
461 // first the method
462
463 methodOop m = *interpreter_frame_method_addr();
464
465 // validate the method we'd find in this potential sender
466 if (!Universe::heap()->is_valid_method(m)) return false;
467
468 // stack frames shouldn't be much larger than max_stack elements
469
470 if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
471 return false;
472 }
473
474 // validate bci/bcx
475
476 intptr_t bcx = interpreter_frame_bcx();
477 if (m->validate_bci_from_bcx(bcx) < 0) {
478 return false;
479 }
480
481 // validate constantPoolCacheOop
482
483 constantPoolCacheOop cp = *interpreter_frame_cache_addr();
484
485 if (cp == NULL ||
486 !Space::is_aligned(cp) ||
487 !Universe::heap()->is_permanent((void*)cp)) return false;
488
489 // validate locals
490
491 address locals = (address) *interpreter_frame_locals_addr();
492
493 if (locals > thread->stack_base() || locals < (address) fp()) return false;
494
495 // We'd have to be pretty unlucky to be mislead at this point
496
497 #endif // CC_INTERP
498 return true;
499 }
500
501 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
502 #ifdef CC_INTERP
503 // Needed for JVMTI. The result should always be in the interpreterState object
504 assert(false, "NYI");
505 interpreterState istate = get_interpreterState();
506 #endif // CC_INTERP
507 assert(is_interpreted_frame(), "interpreted frame expected");
508 methodOop method = interpreter_frame_method();
509 BasicType type = method->result_type();
510
511 intptr_t* tos_addr;
512 if (method->is_native()) {
513 // Prior to calling into the runtime to report the method_exit the possible
514 // return value is pushed to the native stack. If the result is a jfloat/jdouble
515 // then ST0 is saved before EAX/EDX. See the note in generate_native_result
516 tos_addr = (intptr_t*)sp();
|