Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/code/nmethod.hpp
+++ new/src/share/vm/code/nmethod.hpp
1 1 /*
2 2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 // This class is used internally by nmethods, to cache
26 26 // exception/pc/handler information.
27 27
28 28 class ExceptionCache : public CHeapObj {
29 29 friend class VMStructs;
30 30 private:
31 31 static address _unwind_handler;
32 32 enum { cache_size = 16 };
33 33 klassOop _exception_type;
34 34 address _pc[cache_size];
35 35 address _handler[cache_size];
36 36 int _count;
37 37 ExceptionCache* _next;
38 38
39 39 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
40 40 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
41 41 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
42 42 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
43 43 int count() { return _count; }
44 44 void increment_count() { _count++; }
45 45
46 46 public:
47 47
48 48 ExceptionCache(Handle exception, address pc, address handler);
49 49
50 50 klassOop exception_type() { return _exception_type; }
51 51 klassOop* exception_type_addr() { return &_exception_type; }
52 52 ExceptionCache* next() { return _next; }
53 53 void set_next(ExceptionCache *ec) { _next = ec; }
54 54
55 55 address match(Handle exception, address pc);
56 56 bool match_exception_with_space(Handle exception) ;
57 57 address test_address(address addr);
58 58 bool add_address_and_handler(address addr, address handler) ;
59 59
60 60 static address unwind_handler() { return _unwind_handler; }
61 61 };
62 62
63 63
64 64 // cache pc descs found in earlier inquiries
65 65 class PcDescCache VALUE_OBJ_CLASS_SPEC {
66 66 friend class VMStructs;
67 67 private:
68 68 enum { cache_size = 4 };
69 69 PcDesc* _last_pc_desc; // most recent pc_desc found
70 70 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
71 71 public:
72 72 PcDescCache() { debug_only(_last_pc_desc = NULL); }
73 73 void reset_to(PcDesc* initial_pc_desc);
74 74 PcDesc* find_pc_desc(int pc_offset, bool approximate);
75 75 void add_pc_desc(PcDesc* pc_desc);
76 76 PcDesc* last_pc_desc() { return _last_pc_desc; }
77 77 };
78 78
79 79
80 80 // nmethods (native methods) are the compiled code versions of Java methods.
81 81
82 82 struct nmFlags {
83 83 friend class VMStructs;
84 84 unsigned int version:8; // version number (0 = first version)
85 85 unsigned int age:4; // age (in # of sweep steps)
86 86
87 87 unsigned int state:2; // {alive, zombie, unloaded)
88 88
89 89 unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
90 90 unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
91 91 unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
92 92 unsigned int markedForReclamation:1; // Used by NMethodSweeper
93 93
94 94 unsigned int has_unsafe_access:1; // May fault due to unsafe access.
95 95 unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
96 96
97 97 unsigned int speculatively_disconnected:1; // Marked for potential unload
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
98 98
99 99 void clear();
100 100 };
101 101
102 102
103 103 // A nmethod contains:
104 104 // - header (the nmethod structure)
105 105 // [Relocation]
106 106 // - relocation information
107 107 // - constant part (doubles, longs and floats used in nmethod)
108 +// - oop table
108 109 // [Code]
109 110 // - code body
110 111 // - exception handler
111 112 // - stub code
112 113 // [Debugging information]
113 114 // - oop array
114 115 // - data array
115 116 // - pcs
116 117 // [Exception handler table]
117 118 // - handler entry point array
118 119 // [Implicit Null Pointer exception table]
119 120 // - implicit null table array
120 121
121 122 class Dependencies;
122 123 class ExceptionHandlerTable;
123 124 class ImplicitExceptionTable;
124 125 class AbstractCompiler;
125 126 class xmlStream;
126 127
127 128 class nmethod : public CodeBlob {
128 129 friend class VMStructs;
129 130 friend class NMethodSweeper;
130 131 friend class CodeCache; // non-perm oops
131 132 private:
132 133 // Shared fields for all nmethod's
133 134 static int _zombie_instruction_size;
134 135
135 136 methodOop _method;
136 137 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
137 138
138 139 // To support simple linked-list chaining of nmethods:
139 140 nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
140 141 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
141 142 nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
142 143
143 144 static nmethod* volatile _oops_do_mark_nmethods;
144 145 nmethod* volatile _oops_do_mark_link;
145 146
146 147 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
147 148
148 149 // Offsets for different nmethod parts
149 150 int _exception_offset;
150 151 // All deoptee's will resume execution at this location described by
151 152 // this offset.
152 153 int _deoptimize_offset;
153 154 // All deoptee's at a MethodHandle call site will resume execution
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
154 155 // at this location described by this offset.
155 156 int _deoptimize_mh_offset;
156 157 // Offset of the unwind handler if it exists
157 158 int _unwind_handler_offset;
158 159
159 160 #ifdef HAVE_DTRACE_H
160 161 int _trap_offset;
161 162 #endif // def HAVE_DTRACE_H
162 163 int _stub_offset;
163 164 int _consts_offset;
165 + int _oops_offset; // offset to where embedded oop table begins (inside data)
164 166 int _scopes_data_offset;
165 167 int _scopes_pcs_offset;
166 168 int _dependencies_offset;
167 169 int _handler_table_offset;
168 170 int _nul_chk_table_offset;
169 171 int _nmethod_end_offset;
170 172
171 173 // location in frame (offset for sp) that deopt can store the original
172 174 // pc during a deopt.
173 175 int _orig_pc_offset;
174 176
175 177 int _compile_id; // which compilation made this nmethod
176 178 int _comp_level; // compilation level
177 179
178 180 // offsets for entry points
179 181 address _entry_point; // entry point with class check
180 182 address _verified_entry_point; // entry point without class check
181 183 address _osr_entry_point; // entry point for on stack replacement
182 184
183 185 nmFlags flags; // various flags to keep track of nmethod state
184 186 bool _markedForDeoptimization; // Used for stack deoptimization
185 187 enum { alive = 0,
186 188 not_entrant = 1, // uncommon trap has happened but activations may still exist
187 189 zombie = 2,
188 190 unloaded = 3 };
189 191
190 192 // used by jvmti to track if an unload event has been posted for this nmethod.
191 193 bool _unload_reported;
192 194
193 195 jbyte _scavenge_root_state;
194 196
195 197 NOT_PRODUCT(bool _has_debug_info; )
196 198
197 199 // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
198 200 jint _lock_count;
199 201
200 202 // not_entrant method removal. Each mark_sweep pass will update
201 203 // this mark to current sweep invocation count if it is seen on the
202 204 // stack. An not_entrant method can be removed when there is no
203 205 // more activations, i.e., when the _stack_traversal_mark is less than
204 206 // current sweep traversal index.
205 207 long _stack_traversal_mark;
206 208
207 209 ExceptionCache *_exception_cache;
208 210 PcDescCache _pc_desc_cache;
209 211
210 212 // These are only used for compiled synchronized native methods to
211 213 // locate the owner and stack slot for the BasicLock so that we can
212 214 // properly revoke the bias of the owner if necessary. They are
213 215 // needed because there is no debug information for compiled native
214 216 // wrappers and the oop maps are insufficient to allow
215 217 // frame::retrieve_receiver() to work. Currently they are expected
216 218 // to be byte offsets from the Java stack pointer for maximum code
217 219 // sharing between platforms. Note that currently biased locking
218 220 // will never cause Class instances to be biased but this code
219 221 // handles the static synchronized case as well.
220 222 ByteSize _compiled_synchronized_native_basic_lock_owner_sp_offset;
221 223 ByteSize _compiled_synchronized_native_basic_lock_sp_offset;
222 224
223 225 friend class nmethodLocker;
224 226
225 227 // For native wrappers
226 228 nmethod(methodOop method,
227 229 int nmethod_size,
228 230 CodeOffsets* offsets,
229 231 CodeBuffer *code_buffer,
230 232 int frame_size,
231 233 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
232 234 ByteSize basic_lock_sp_offset, /* synchronized natives only */
233 235 OopMapSet* oop_maps);
234 236
235 237 #ifdef HAVE_DTRACE_H
236 238 // For native wrappers
237 239 nmethod(methodOop method,
238 240 int nmethod_size,
239 241 CodeOffsets* offsets,
240 242 CodeBuffer *code_buffer,
241 243 int frame_size);
242 244 #endif // def HAVE_DTRACE_H
243 245
244 246 // Creation support
245 247 nmethod(methodOop method,
246 248 int nmethod_size,
247 249 int compile_id,
248 250 int entry_bci,
249 251 CodeOffsets* offsets,
250 252 int orig_pc_offset,
251 253 DebugInformationRecorder *recorder,
252 254 Dependencies* dependencies,
253 255 CodeBuffer *code_buffer,
254 256 int frame_size,
255 257 OopMapSet* oop_maps,
256 258 ExceptionHandlerTable* handler_table,
257 259 ImplicitExceptionTable* nul_chk_table,
258 260 AbstractCompiler* compiler,
259 261 int comp_level);
260 262
261 263 // helper methods
262 264 void* operator new(size_t size, int nmethod_size);
263 265
264 266 const char* reloc_string_for(u_char* begin, u_char* end);
265 267 // Returns true if this thread changed the state of the nmethod or
266 268 // false if another thread performed the transition.
267 269 bool make_not_entrant_or_zombie(unsigned int state);
268 270 void inc_decompile_count();
269 271
270 272 // used to check that writes to nmFlags are done consistently.
271 273 static void check_safepoint() PRODUCT_RETURN;
272 274
273 275 // Used to manipulate the exception cache
274 276 void add_exception_cache_entry(ExceptionCache* new_entry);
275 277 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
276 278
277 279 // Inform external interfaces that a compiled method has been unloaded
278 280 inline void post_compiled_method_unload();
279 281
280 282 public:
281 283 // create nmethod with entry_bci
282 284 static nmethod* new_nmethod(methodHandle method,
283 285 int compile_id,
284 286 int entry_bci,
285 287 CodeOffsets* offsets,
286 288 int orig_pc_offset,
287 289 DebugInformationRecorder* recorder,
288 290 Dependencies* dependencies,
289 291 CodeBuffer *code_buffer,
290 292 int frame_size,
291 293 OopMapSet* oop_maps,
292 294 ExceptionHandlerTable* handler_table,
293 295 ImplicitExceptionTable* nul_chk_table,
294 296 AbstractCompiler* compiler,
295 297 int comp_level);
296 298
297 299 static nmethod* new_native_nmethod(methodHandle method,
298 300 CodeBuffer *code_buffer,
299 301 int vep_offset,
300 302 int frame_complete,
301 303 int frame_size,
302 304 ByteSize receiver_sp_offset,
303 305 ByteSize basic_lock_sp_offset,
304 306 OopMapSet* oop_maps);
305 307
306 308 #ifdef HAVE_DTRACE_H
307 309 // The method we generate for a dtrace probe has to look
308 310 // like an nmethod as far as the rest of the system is concerned
309 311 // which is somewhat unfortunate.
310 312 static nmethod* new_dtrace_nmethod(methodHandle method,
311 313 CodeBuffer *code_buffer,
312 314 int vep_offset,
313 315 int trap_offset,
314 316 int frame_complete,
315 317 int frame_size);
316 318
317 319 int trap_offset() const { return _trap_offset; }
318 320 address trap_address() const { return code_begin() + _trap_offset; }
319 321
320 322 #endif // def HAVE_DTRACE_H
321 323
322 324 // accessors
323 325 methodOop method() const { return _method; }
324 326 AbstractCompiler* compiler() const { return _compiler; }
325 327
326 328 #ifndef PRODUCT
327 329 bool has_debug_info() const { return _has_debug_info; }
328 330 void set_has_debug_info(bool f) { _has_debug_info = false; }
329 331 #endif // NOT PRODUCT
330 332
331 333 // type info
332 334 bool is_nmethod() const { return true; }
333 335 bool is_java_method() const { return !method()->is_native(); }
334 336 bool is_native_method() const { return method()->is_native(); }
335 337 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
336 338
337 339 bool is_compiled_by_c1() const;
338 340 bool is_compiled_by_c2() const;
339 341
↓ open down ↓ |
166 lines elided |
↑ open up ↑ |
340 342 // boundaries for different parts
341 343 address code_begin () const { return _entry_point; }
342 344 address code_end () const { return header_begin() + _stub_offset ; }
343 345 address exception_begin () const { return header_begin() + _exception_offset ; }
344 346 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
345 347 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
346 348 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
347 349 address stub_begin () const { return header_begin() + _stub_offset ; }
348 350 address stub_end () const { return header_begin() + _consts_offset ; }
349 351 address consts_begin () const { return header_begin() + _consts_offset ; }
350 - address consts_end () const { return header_begin() + _scopes_data_offset ; }
352 + address consts_end () const { return header_begin() + _oops_offset ; }
353 + oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
354 + oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
355 +
351 356 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
352 357 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
353 358 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
354 359 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
355 360 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
356 361 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
357 362 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
358 363 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
359 364 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
360 365 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
361 366
362 - int code_size () const { return code_end () - code_begin (); }
363 - int stub_size () const { return stub_end () - stub_begin (); }
364 - int consts_size () const { return consts_end () - consts_begin (); }
365 - int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
366 - int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); }
367 - int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
368 - int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
369 - int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
367 + // Sizes
368 + int code_size () const { return code_end () - code_begin (); }
369 + int stub_size () const { return stub_end () - stub_begin (); }
370 + int consts_size () const { return consts_end () - consts_begin (); }
371 + int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
372 + int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
373 + int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
374 + int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
375 + int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
376 + int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
370 377
371 378 int total_size () const;
372 379
380 + // Containment
373 381 bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
374 382 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
375 383 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
384 + bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
376 385 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
377 386 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
378 387 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
379 388 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
380 389
381 390 // entry points
382 391 address entry_point() const { return _entry_point; } // normal entry point
383 392 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
384 393
385 394 // flag accessing and manipulation
386 395 bool is_in_use() const { return flags.state == alive; }
387 396 bool is_alive() const { return flags.state == alive || flags.state == not_entrant; }
388 397 bool is_not_entrant() const { return flags.state == not_entrant; }
389 398 bool is_zombie() const { return flags.state == zombie; }
390 399 bool is_unloaded() const { return flags.state == unloaded; }
391 400
392 401 // Make the nmethod non entrant. The nmethod will continue to be
393 402 // alive. It is used when an uncommon trap happens. Returns true
394 403 // if this thread changed the state of the nmethod or false if
395 404 // another thread performed the transition.
396 405 bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
397 406 bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
398 407
399 408 // used by jvmti to track if the unload event has been reported
400 409 bool unload_reported() { return _unload_reported; }
401 410 void set_unload_reported() { _unload_reported = true; }
402 411
403 412 bool is_marked_for_deoptimization() const { return _markedForDeoptimization; }
404 413 void mark_for_deoptimization() { _markedForDeoptimization = true; }
405 414
406 415 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
407 416
408 417 bool has_dependencies() { return dependencies_size() != 0; }
409 418 void flush_dependencies(BoolObjectClosure* is_alive);
410 419 bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
411 420 void set_has_flushed_dependencies() {
412 421 assert(!has_flushed_dependencies(), "should only happen once");
413 422 flags.hasFlushedDependencies = 1;
414 423 }
415 424
416 425 bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
417 426 void mark_for_reclamation() { flags.markedForReclamation = 1; }
418 427 void unmark_for_reclamation() { flags.markedForReclamation = 0; }
419 428
420 429 bool has_unsafe_access() const { return flags.has_unsafe_access; }
421 430 void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
422 431
423 432 bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
424 433 void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
425 434
426 435 bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
427 436 void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
428 437
429 438 int comp_level() const { return _comp_level; }
430 439
431 440 int version() const { return flags.version; }
432 441 void set_version(int v);
433 442
443 + // Support for oops in scopes and relocs:
444 + // Note: index 0 is reserved for null.
445 + oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
446 + oop* oop_addr_at(int index) const { // for GC
447 + // relocation indexes are biased by 1 (because 0 is reserved)
448 + assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
449 + return &oops_begin()[index - 1];
450 + }
451 +
452 + void copy_oops(GrowableArray<jobject>* oops);
453 +
454 + // Relocation support
455 +private:
456 + void fix_oop_relocations(address begin, address end, bool initialize_immediates);
457 + inline void initialize_immediate_oop(oop* dest, jobject handle);
458 +
459 +public:
460 + void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
461 + void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
462 +
463 + bool is_at_poll_return(address pc);
464 + bool is_at_poll_or_poll_return(address pc);
465 +
434 466 // Non-perm oop support
435 467 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
436 468 protected:
437 469 enum { npl_on_list = 0x01, npl_marked = 0x10 };
438 470 void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
439 471 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
440 472 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
441 473 #ifndef PRODUCT
442 474 void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
443 475 void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
444 476 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
445 477 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
446 478 #endif //PRODUCT
447 479 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
448 480 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
449 481
450 482 nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
451 483 void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
452 484
453 485 public:
454 486
455 487 // Sweeper support
456 488 long stack_traversal_mark() { return _stack_traversal_mark; }
457 489 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
458 490
459 491 // Exception cache support
460 492 ExceptionCache* exception_cache() const { return _exception_cache; }
461 493 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
462 494 address handler_for_exception_and_pc(Handle exception, address pc);
463 495 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
464 496 void remove_from_exception_cache(ExceptionCache* ec);
465 497
466 498 // implicit exceptions support
467 499 address continuation_for_implicit_exception(address pc);
468 500
469 501 // On-stack replacement support
470 502 int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
471 503 address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
472 504 void invalidate_osr_method();
473 505 nmethod* osr_link() const { return _osr_link; }
474 506 void set_osr_link(nmethod *n) { _osr_link = n; }
475 507
476 508 // tells whether frames described by this nmethod can be deoptimized
477 509 // note: native wrappers cannot be deoptimized.
478 510 bool can_be_deoptimized() const { return is_java_method(); }
479 511
480 512 // Inline cache support
481 513 void clear_inline_caches();
482 514 void cleanup_inline_caches();
483 515 bool inlinecache_check_contains(address addr) const {
484 516 return (addr >= instructions_begin() && addr < verified_entry_point());
485 517 }
486 518
487 519 // unlink and deallocate this nmethod
488 520 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
489 521 // expected to use any other private methods/data in this class.
490 522
491 523 protected:
492 524 void flush();
493 525
494 526 public:
495 527 // If returning true, it is unsafe to remove this nmethod even though it is a zombie
496 528 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
497 529 bool is_locked_by_vm() const { return _lock_count >0; }
498 530
499 531 // See comment at definition of _last_seen_on_stack
500 532 void mark_as_seen_on_stack();
501 533 bool can_not_entrant_be_converted();
502 534
503 535 // Evolution support. We make old (discarded) compiled methods point to new methodOops.
↓ open down ↓ |
60 lines elided |
↑ open up ↑ |
504 536 void set_method(methodOop method) { _method = method; }
505 537
506 538 // GC support
507 539 void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
508 540 bool unloading_occurred);
509 541 bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
510 542 oop* root, bool unloading_occurred);
511 543
512 544 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
513 545 OopClosure* f);
514 - virtual void oops_do(OopClosure* f) { oops_do(f, false); }
515 - void oops_do(OopClosure* f, bool do_strong_roots_only);
546 + void oops_do(OopClosure* f) { oops_do(f, false); }
547 + void oops_do(OopClosure* f, bool do_strong_roots_only);
516 548 bool detect_scavenge_root_oops();
517 549 void verify_scavenge_root_oops() PRODUCT_RETURN;
518 550
519 551 bool test_set_oops_do_mark();
520 552 static void oops_do_marking_prologue();
521 553 static void oops_do_marking_epilogue();
522 554 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
523 555 DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
524 556
525 557 // ScopeDesc for an instruction
526 558 ScopeDesc* scope_desc_at(address pc);
527 559
528 560 private:
529 561 ScopeDesc* scope_desc_in(address begin, address end);
530 562
531 563 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
532 564
533 565 PcDesc* find_pc_desc_internal(address pc, bool approximate);
534 566
535 567 PcDesc* find_pc_desc(address pc, bool approximate) {
536 568 PcDesc* desc = _pc_desc_cache.last_pc_desc();
537 569 if (desc != NULL && desc->pc_offset() == pc - instructions_begin()) {
538 570 return desc;
539 571 }
540 572 return find_pc_desc_internal(pc, approximate);
541 573 }
542 574
543 575 public:
544 576 // ScopeDesc retrieval operation
545 577 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
546 578 // pc_desc_near returns the first PcDesc at or after the givne pc.
547 579 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
548 580
549 581 public:
550 582 // copying of debugging information
551 583 void copy_scopes_pcs(PcDesc* pcs, int count);
552 584 void copy_scopes_data(address buffer, int size);
553 585
554 586 // Deopt
555 587 // Return true is the PC is one would expect if the frame is being deopted.
556 588 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
557 589 bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); }
558 590 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
559 591 // Accessor/mutator for the original pc of a frame before a frame was deopted.
560 592 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
561 593 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
562 594
563 595 static address get_deopt_original_pc(const frame* fr);
564 596
565 597 // MethodHandle
566 598 bool is_method_handle_return(address return_pc);
567 599
568 600 // jvmti support:
569 601 void post_compiled_method_load_event();
570 602
571 603 // verify operations
572 604 void verify();
573 605 void verify_scopes();
574 606 void verify_interrupt_point(address interrupt_point);
575 607
576 608 // printing support
577 609 void print() const;
578 610 void print_code();
579 611 void print_relocations() PRODUCT_RETURN;
580 612 void print_pcs() PRODUCT_RETURN;
581 613 void print_scopes() PRODUCT_RETURN;
582 614 void print_dependencies() PRODUCT_RETURN;
583 615 void print_value_on(outputStream* st) const PRODUCT_RETURN;
584 616 void print_calls(outputStream* st) PRODUCT_RETURN;
585 617 void print_handler_table() PRODUCT_RETURN;
586 618 void print_nul_chk_table() PRODUCT_RETURN;
587 619 void print_nmethod(bool print_code);
588 620
589 621 void print_on(outputStream* st, const char* title) const;
590 622
591 623 // Logging
592 624 void log_identity(xmlStream* log) const;
593 625 void log_new_nmethod() const;
594 626 void log_state_change() const;
595 627
596 628 // Prints block-level comments, including nmethod specific block labels:
597 629 virtual void print_block_comment(outputStream* stream, address block_begin) {
598 630 print_nmethod_labels(stream, block_begin);
599 631 CodeBlob::print_block_comment(stream, block_begin);
600 632 }
601 633 void print_nmethod_labels(outputStream* stream, address block_begin);
602 634
603 635 // Prints a comment for one native instruction (reloc info, pc desc)
604 636 void print_code_comment_on(outputStream* st, int column, address begin, address end);
605 637 static void print_statistics() PRODUCT_RETURN;
606 638
607 639 // Compiler task identification. Note that all OSR methods
608 640 // are numbered in an independent sequence if CICountOSR is true,
609 641 // and native method wrappers are also numbered independently if
610 642 // CICountNative is true.
611 643 int compile_id() const { return _compile_id; }
612 644 const char* compile_kind() const;
613 645
614 646 // For debugging
615 647 // CompiledIC* IC_at(char* p) const;
616 648 // PrimitiveIC* primitiveIC_at(char* p) const;
617 649 oop embeddedOop_at(address p);
618 650
619 651 // tells if any of this method's dependencies have been invalidated
620 652 // (this is expensive!)
621 653 bool check_all_dependencies();
622 654
623 655 // tells if this compiled method is dependent on the given changes,
624 656 // and the changes have invalidated it
625 657 bool check_dependency_on(DepChange& changes);
626 658
627 659 // Evolution support. Tells if this compiled method is dependent on any of
628 660 // methods m() of class dependee, such that if m() in dependee is replaced,
629 661 // this compiled method will have to be deoptimized.
630 662 bool is_evol_dependent_on(klassOop dependee);
631 663
632 664 // Fast breakpoint support. Tells if this compiled method is
633 665 // dependent on the given method. Returns true if this nmethod
634 666 // corresponds to the given method as well.
635 667 bool is_dependent_on_method(methodOop dependee);
636 668
637 669 // is it ok to patch at address?
638 670 bool is_patchable_at(address instr_address);
639 671
640 672 // UseBiasedLocking support
641 673 ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() {
642 674 return _compiled_synchronized_native_basic_lock_owner_sp_offset;
643 675 }
644 676 ByteSize compiled_synchronized_native_basic_lock_sp_offset() {
645 677 return _compiled_synchronized_native_basic_lock_sp_offset;
646 678 }
647 679
648 680 // support for code generation
649 681 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
650 682 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
651 683 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
652 684
653 685 };
654 686
655 687 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
656 688 class nmethodLocker : public StackObj {
657 689 nmethod* _nm;
658 690
659 691 static void lock_nmethod(nmethod* nm); // note: nm can be NULL
660 692 static void unlock_nmethod(nmethod* nm); // (ditto)
661 693
662 694 public:
663 695 nmethodLocker(address pc); // derive nm from pc
664 696 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
665 697 nmethodLocker() { _nm = NULL; }
666 698 ~nmethodLocker() { unlock_nmethod(_nm); }
667 699
668 700 nmethod* code() { return _nm; }
669 701 void set_code(nmethod* new_nm) {
670 702 unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
671 703 _nm = new_nm;
672 704 lock_nmethod(_nm);
673 705 }
674 706 };
↓ open down ↓ |
149 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX