Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/code/nmethod.hpp
+++ new/src/share/vm/code/nmethod.hpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_CODE_NMETHOD_HPP
26 26 #define SHARE_VM_CODE_NMETHOD_HPP
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
27 27
28 28 #include "code/codeBlob.hpp"
29 29 #include "code/pcDesc.hpp"
30 30
31 31 // This class is used internally by nmethods, to cache
32 32 // exception/pc/handler information.
33 33
34 34 class ExceptionCache : public CHeapObj {
35 35 friend class VMStructs;
36 36 private:
37 - static address _unwind_handler;
38 37 enum { cache_size = 16 };
39 38 klassOop _exception_type;
40 39 address _pc[cache_size];
41 40 address _handler[cache_size];
42 41 int _count;
43 42 ExceptionCache* _next;
44 43
45 44 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
46 45 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
47 46 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
48 47 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
49 48 int count() { return _count; }
50 49 void increment_count() { _count++; }
51 50
52 51 public:
53 52
54 53 ExceptionCache(Handle exception, address pc, address handler);
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
55 54
56 55 klassOop exception_type() { return _exception_type; }
57 56 klassOop* exception_type_addr() { return &_exception_type; }
58 57 ExceptionCache* next() { return _next; }
59 58 void set_next(ExceptionCache *ec) { _next = ec; }
60 59
61 60 address match(Handle exception, address pc);
62 61 bool match_exception_with_space(Handle exception) ;
63 62 address test_address(address addr);
64 63 bool add_address_and_handler(address addr, address handler) ;
65 -
66 - static address unwind_handler() { return _unwind_handler; }
67 64 };
68 65
69 66
70 67 // cache pc descs found in earlier inquiries
71 68 class PcDescCache VALUE_OBJ_CLASS_SPEC {
72 69 friend class VMStructs;
73 70 private:
74 71 enum { cache_size = 4 };
75 72 PcDesc* _last_pc_desc; // most recent pc_desc found
76 73 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
77 74 public:
78 75 PcDescCache() { debug_only(_last_pc_desc = NULL); }
79 76 void reset_to(PcDesc* initial_pc_desc);
80 77 PcDesc* find_pc_desc(int pc_offset, bool approximate);
81 78 void add_pc_desc(PcDesc* pc_desc);
82 79 PcDesc* last_pc_desc() { return _last_pc_desc; }
83 80 };
84 81
85 82
86 83 // nmethods (native methods) are the compiled code versions of Java methods.
87 84 //
88 85 // An nmethod contains:
89 86 // - header (the nmethod structure)
90 87 // [Relocation]
91 88 // - relocation information
92 89 // - constant part (doubles, longs and floats used in nmethod)
93 90 // - oop table
94 91 // [Code]
95 92 // - code body
96 93 // - exception handler
97 94 // - stub code
98 95 // [Debugging information]
99 96 // - oop array
100 97 // - data array
101 98 // - pcs
102 99 // [Exception handler table]
103 100 // - handler entry point array
104 101 // [Implicit Null Pointer exception table]
105 102 // - implicit null table array
106 103
107 104 class Dependencies;
108 105 class ExceptionHandlerTable;
109 106 class ImplicitExceptionTable;
110 107 class AbstractCompiler;
111 108 class xmlStream;
112 109
113 110 class nmethod : public CodeBlob {
114 111 friend class VMStructs;
115 112 friend class NMethodSweeper;
116 113 friend class CodeCache; // non-perm oops
117 114 private:
118 115 // Shared fields for all nmethod's
119 116 methodOop _method;
120 117 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
121 118 jmethodID _jmethod_id; // Cache of method()->jmethod_id()
122 119
123 120 // To support simple linked-list chaining of nmethods:
124 121 nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
125 122 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
126 123 nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
127 124
128 125 static nmethod* volatile _oops_do_mark_nmethods;
129 126 nmethod* volatile _oops_do_mark_link;
130 127
131 128 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
132 129
133 130 // offsets for entry points
134 131 address _entry_point; // entry point with class check
135 132 address _verified_entry_point; // entry point without class check
136 133 address _osr_entry_point; // entry point for on stack replacement
137 134
138 135 // Offsets for different nmethod parts
139 136 int _exception_offset;
140 137 // All deoptee's will resume execution at this location described by
141 138 // this offset.
142 139 int _deoptimize_offset;
143 140 // All deoptee's at a MethodHandle call site will resume execution
144 141 // at this location described by this offset.
145 142 int _deoptimize_mh_offset;
146 143 // Offset of the unwind handler if it exists
147 144 int _unwind_handler_offset;
148 145
149 146 #ifdef HAVE_DTRACE_H
150 147 int _trap_offset;
151 148 #endif // def HAVE_DTRACE_H
152 149 int _consts_offset;
153 150 int _stub_offset;
154 151 int _oops_offset; // offset to where embedded oop table begins (inside data)
155 152 int _scopes_data_offset;
156 153 int _scopes_pcs_offset;
157 154 int _dependencies_offset;
158 155 int _handler_table_offset;
159 156 int _nul_chk_table_offset;
160 157 int _nmethod_end_offset;
161 158
162 159 // location in frame (offset for sp) that deopt can store the original
163 160 // pc during a deopt.
164 161 int _orig_pc_offset;
165 162
166 163 int _compile_id; // which compilation made this nmethod
167 164 int _comp_level; // compilation level
168 165
169 166 // protected by CodeCache_lock
170 167 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
171 168 bool _speculatively_disconnected; // Marked for potential unload
172 169
173 170 bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
174 171 bool _marked_for_deoptimization; // Used for stack deoptimization
175 172
176 173 // used by jvmti to track if an unload event has been posted for this nmethod.
177 174 bool _unload_reported;
178 175
179 176 // set during construction
180 177 unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
181 178 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
182 179
183 180 // Protected by Patching_lock
184 181 unsigned char _state; // {alive, not_entrant, zombie, unloaded)
185 182
186 183 #ifdef ASSERT
187 184 bool _oops_are_stale; // indicates that it's no longer safe to access oops section
188 185 #endif
189 186
190 187 enum { alive = 0,
191 188 not_entrant = 1, // uncommon trap has happened but activations may still exist
192 189 zombie = 2,
193 190 unloaded = 3 };
194 191
195 192
196 193 jbyte _scavenge_root_state;
197 194
198 195 NOT_PRODUCT(bool _has_debug_info; )
199 196
200 197 // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
201 198 jint _lock_count;
202 199
203 200 // not_entrant method removal. Each mark_sweep pass will update
204 201 // this mark to current sweep invocation count if it is seen on the
205 202 // stack. An not_entrant method can be removed when there is no
206 203 // more activations, i.e., when the _stack_traversal_mark is less than
207 204 // current sweep traversal index.
208 205 long _stack_traversal_mark;
209 206
210 207 ExceptionCache *_exception_cache;
211 208 PcDescCache _pc_desc_cache;
212 209
213 210 // These are used for compiled synchronized native methods to
214 211 // locate the owner and stack slot for the BasicLock so that we can
215 212 // properly revoke the bias of the owner if necessary. They are
216 213 // needed because there is no debug information for compiled native
217 214 // wrappers and the oop maps are insufficient to allow
218 215 // frame::retrieve_receiver() to work. Currently they are expected
219 216 // to be byte offsets from the Java stack pointer for maximum code
220 217 // sharing between platforms. Note that currently biased locking
221 218 // will never cause Class instances to be biased but this code
222 219 // handles the static synchronized case as well.
223 220 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
224 221 // for non-static native wrapper frames.
225 222 ByteSize _native_receiver_sp_offset;
226 223 ByteSize _native_basic_lock_sp_offset;
227 224
228 225 friend class nmethodLocker;
229 226
230 227 // For native wrappers
231 228 nmethod(methodOop method,
232 229 int nmethod_size,
233 230 CodeOffsets* offsets,
234 231 CodeBuffer *code_buffer,
235 232 int frame_size,
236 233 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
237 234 ByteSize basic_lock_sp_offset, /* synchronized natives only */
238 235 OopMapSet* oop_maps);
239 236
240 237 #ifdef HAVE_DTRACE_H
241 238 // For native wrappers
242 239 nmethod(methodOop method,
243 240 int nmethod_size,
244 241 CodeOffsets* offsets,
245 242 CodeBuffer *code_buffer,
246 243 int frame_size);
247 244 #endif // def HAVE_DTRACE_H
248 245
249 246 // Creation support
250 247 nmethod(methodOop method,
251 248 int nmethod_size,
252 249 int compile_id,
253 250 int entry_bci,
254 251 CodeOffsets* offsets,
255 252 int orig_pc_offset,
256 253 DebugInformationRecorder *recorder,
257 254 Dependencies* dependencies,
258 255 CodeBuffer *code_buffer,
259 256 int frame_size,
260 257 OopMapSet* oop_maps,
261 258 ExceptionHandlerTable* handler_table,
262 259 ImplicitExceptionTable* nul_chk_table,
263 260 AbstractCompiler* compiler,
264 261 int comp_level);
265 262
266 263 // helper methods
267 264 void* operator new(size_t size, int nmethod_size);
268 265
269 266 const char* reloc_string_for(u_char* begin, u_char* end);
270 267 // Returns true if this thread changed the state of the nmethod or
271 268 // false if another thread performed the transition.
272 269 bool make_not_entrant_or_zombie(unsigned int state);
273 270 void inc_decompile_count();
274 271
275 272 // Used to manipulate the exception cache
276 273 void add_exception_cache_entry(ExceptionCache* new_entry);
277 274 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
278 275
279 276 // Inform external interfaces that a compiled method has been unloaded
280 277 void post_compiled_method_unload();
281 278
282 279 // Initailize fields to their default values
283 280 void init_defaults();
284 281
285 282 public:
286 283 // create nmethod with entry_bci
287 284 static nmethod* new_nmethod(methodHandle method,
288 285 int compile_id,
289 286 int entry_bci,
290 287 CodeOffsets* offsets,
291 288 int orig_pc_offset,
292 289 DebugInformationRecorder* recorder,
293 290 Dependencies* dependencies,
294 291 CodeBuffer *code_buffer,
295 292 int frame_size,
296 293 OopMapSet* oop_maps,
297 294 ExceptionHandlerTable* handler_table,
298 295 ImplicitExceptionTable* nul_chk_table,
299 296 AbstractCompiler* compiler,
300 297 int comp_level);
301 298
302 299 static nmethod* new_native_nmethod(methodHandle method,
303 300 CodeBuffer *code_buffer,
304 301 int vep_offset,
305 302 int frame_complete,
306 303 int frame_size,
307 304 ByteSize receiver_sp_offset,
308 305 ByteSize basic_lock_sp_offset,
309 306 OopMapSet* oop_maps);
310 307
311 308 #ifdef HAVE_DTRACE_H
312 309 // The method we generate for a dtrace probe has to look
313 310 // like an nmethod as far as the rest of the system is concerned
314 311 // which is somewhat unfortunate.
315 312 static nmethod* new_dtrace_nmethod(methodHandle method,
316 313 CodeBuffer *code_buffer,
317 314 int vep_offset,
318 315 int trap_offset,
319 316 int frame_complete,
320 317 int frame_size);
321 318
322 319 int trap_offset() const { return _trap_offset; }
323 320 address trap_address() const { return insts_begin() + _trap_offset; }
324 321
325 322 #endif // def HAVE_DTRACE_H
326 323
327 324 // accessors
328 325 methodOop method() const { return _method; }
329 326 AbstractCompiler* compiler() const { return _compiler; }
330 327
331 328 #ifndef PRODUCT
332 329 bool has_debug_info() const { return _has_debug_info; }
333 330 void set_has_debug_info(bool f) { _has_debug_info = false; }
334 331 #endif // NOT PRODUCT
335 332
336 333 // type info
337 334 bool is_nmethod() const { return true; }
338 335 bool is_java_method() const { return !method()->is_native(); }
339 336 bool is_native_method() const { return method()->is_native(); }
340 337 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
341 338
342 339 bool is_compiled_by_c1() const;
343 340 bool is_compiled_by_c2() const;
344 341 bool is_compiled_by_shark() const;
345 342
346 343 // boundaries for different parts
347 344 address consts_begin () const { return header_begin() + _consts_offset ; }
348 345 address consts_end () const { return header_begin() + code_offset() ; }
349 346 address insts_begin () const { return header_begin() + code_offset() ; }
350 347 address insts_end () const { return header_begin() + _stub_offset ; }
351 348 address stub_begin () const { return header_begin() + _stub_offset ; }
352 349 address stub_end () const { return header_begin() + _oops_offset ; }
353 350 address exception_begin () const { return header_begin() + _exception_offset ; }
354 351 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
355 352 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
356 353 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
357 354 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
358 355 oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
359 356
360 357 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
361 358 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
362 359 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
363 360 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
364 361 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
365 362 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
366 363 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
367 364 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
368 365 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
369 366 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
370 367
371 368 // Sizes
372 369 int consts_size () const { return consts_end () - consts_begin (); }
373 370 int insts_size () const { return insts_end () - insts_begin (); }
374 371 int stub_size () const { return stub_end () - stub_begin (); }
375 372 int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
376 373 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
377 374 int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
378 375 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
379 376 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
380 377 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
381 378
382 379 int total_size () const;
383 380
384 381 // Containment
385 382 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
386 383 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
387 384 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
388 385 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
389 386 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
390 387 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
391 388 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
392 389 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
393 390
394 391 // entry points
395 392 address entry_point() const { return _entry_point; } // normal entry point
396 393 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
397 394
398 395 // flag accessing and manipulation
399 396 bool is_in_use() const { return _state == alive; }
400 397 bool is_alive() const { return _state == alive || _state == not_entrant; }
401 398 bool is_not_entrant() const { return _state == not_entrant; }
402 399 bool is_zombie() const { return _state == zombie; }
403 400 bool is_unloaded() const { return _state == unloaded; }
404 401
405 402 // Make the nmethod non entrant. The nmethod will continue to be
406 403 // alive. It is used when an uncommon trap happens. Returns true
407 404 // if this thread changed the state of the nmethod or false if
408 405 // another thread performed the transition.
409 406 bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
410 407 bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
411 408
412 409 // used by jvmti to track if the unload event has been reported
413 410 bool unload_reported() { return _unload_reported; }
414 411 void set_unload_reported() { _unload_reported = true; }
415 412
416 413 bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
417 414 void mark_for_deoptimization() { _marked_for_deoptimization = true; }
418 415
419 416 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
420 417
421 418 bool has_dependencies() { return dependencies_size() != 0; }
422 419 void flush_dependencies(BoolObjectClosure* is_alive);
423 420 bool has_flushed_dependencies() { return _has_flushed_dependencies; }
424 421 void set_has_flushed_dependencies() {
425 422 assert(!has_flushed_dependencies(), "should only happen once");
426 423 _has_flushed_dependencies = 1;
427 424 }
428 425
429 426 bool is_marked_for_reclamation() const { return _marked_for_reclamation; }
430 427 void mark_for_reclamation() { _marked_for_reclamation = 1; }
431 428
432 429 bool has_unsafe_access() const { return _has_unsafe_access; }
433 430 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
434 431
435 432 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
436 433 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
437 434
438 435 bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
439 436 void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
440 437
441 438 int comp_level() const { return _comp_level; }
442 439
443 440 // Support for oops in scopes and relocs:
444 441 // Note: index 0 is reserved for null.
445 442 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
446 443 oop* oop_addr_at(int index) const { // for GC
447 444 // relocation indexes are biased by 1 (because 0 is reserved)
448 445 assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
449 446 assert(!_oops_are_stale, "oops are stale");
450 447 return &oops_begin()[index - 1];
451 448 }
452 449
453 450 void copy_oops(GrowableArray<jobject>* oops);
454 451
455 452 // Relocation support
456 453 private:
457 454 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
458 455 inline void initialize_immediate_oop(oop* dest, jobject handle);
459 456
460 457 public:
461 458 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
462 459 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
463 460
464 461 bool is_at_poll_return(address pc);
465 462 bool is_at_poll_or_poll_return(address pc);
466 463
467 464 // Non-perm oop support
468 465 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
469 466 protected:
470 467 enum { npl_on_list = 0x01, npl_marked = 0x10 };
471 468 void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
472 469 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
473 470 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
474 471 #ifndef PRODUCT
475 472 void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
476 473 void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
477 474 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
478 475 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
479 476 #endif //PRODUCT
480 477 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
481 478 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
482 479
483 480 nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
484 481 void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
485 482
486 483 public:
487 484
488 485 // Sweeper support
489 486 long stack_traversal_mark() { return _stack_traversal_mark; }
490 487 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
491 488
492 489 // Exception cache support
493 490 ExceptionCache* exception_cache() const { return _exception_cache; }
494 491 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
495 492 address handler_for_exception_and_pc(Handle exception, address pc);
496 493 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
497 494 void remove_from_exception_cache(ExceptionCache* ec);
498 495
499 496 // implicit exceptions support
500 497 address continuation_for_implicit_exception(address pc);
501 498
502 499 // On-stack replacement support
503 500 int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
504 501 address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
505 502 void invalidate_osr_method();
506 503 nmethod* osr_link() const { return _osr_link; }
507 504 void set_osr_link(nmethod *n) { _osr_link = n; }
508 505
509 506 // tells whether frames described by this nmethod can be deoptimized
510 507 // note: native wrappers cannot be deoptimized.
511 508 bool can_be_deoptimized() const { return is_java_method(); }
512 509
513 510 // Inline cache support
514 511 void clear_inline_caches();
515 512 void cleanup_inline_caches();
516 513 bool inlinecache_check_contains(address addr) const {
517 514 return (addr >= code_begin() && addr < verified_entry_point());
518 515 }
519 516
520 517 // unlink and deallocate this nmethod
521 518 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
522 519 // expected to use any other private methods/data in this class.
523 520
524 521 protected:
525 522 void flush();
526 523
527 524 public:
528 525 // If returning true, it is unsafe to remove this nmethod even though it is a zombie
529 526 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
530 527 bool is_locked_by_vm() const { return _lock_count >0; }
531 528
532 529 // See comment at definition of _last_seen_on_stack
533 530 void mark_as_seen_on_stack();
534 531 bool can_not_entrant_be_converted();
535 532
536 533 // Evolution support. We make old (discarded) compiled methods point to new methodOops.
537 534 void set_method(methodOop method) { _method = method; }
538 535
539 536 // GC support
540 537 void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
541 538 bool unloading_occurred);
542 539 bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
543 540 oop* root, bool unloading_occurred);
544 541
545 542 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
546 543 OopClosure* f);
547 544 void oops_do(OopClosure* f) { oops_do(f, false); }
548 545 void oops_do(OopClosure* f, bool do_strong_roots_only);
549 546 bool detect_scavenge_root_oops();
550 547 void verify_scavenge_root_oops() PRODUCT_RETURN;
551 548
552 549 bool test_set_oops_do_mark();
553 550 static void oops_do_marking_prologue();
554 551 static void oops_do_marking_epilogue();
555 552 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
556 553 DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
557 554
558 555 // ScopeDesc for an instruction
559 556 ScopeDesc* scope_desc_at(address pc);
560 557
561 558 private:
562 559 ScopeDesc* scope_desc_in(address begin, address end);
563 560
564 561 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
565 562
566 563 PcDesc* find_pc_desc_internal(address pc, bool approximate);
567 564
568 565 PcDesc* find_pc_desc(address pc, bool approximate) {
569 566 PcDesc* desc = _pc_desc_cache.last_pc_desc();
570 567 if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
571 568 return desc;
572 569 }
573 570 return find_pc_desc_internal(pc, approximate);
574 571 }
575 572
576 573 public:
577 574 // ScopeDesc retrieval operation
578 575 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
579 576 // pc_desc_near returns the first PcDesc at or after the givne pc.
580 577 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
581 578
582 579 public:
583 580 // copying of debugging information
584 581 void copy_scopes_pcs(PcDesc* pcs, int count);
585 582 void copy_scopes_data(address buffer, int size);
586 583
587 584 // Deopt
588 585 // Return true is the PC is one would expect if the frame is being deopted.
589 586 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
590 587 bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); }
591 588 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
592 589 // Accessor/mutator for the original pc of a frame before a frame was deopted.
593 590 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
594 591 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
595 592
596 593 static address get_deopt_original_pc(const frame* fr);
597 594
598 595 // MethodHandle
599 596 bool is_method_handle_return(address return_pc);
600 597
601 598 // jvmti support:
602 599 void post_compiled_method_load_event();
603 600 jmethodID get_and_cache_jmethod_id();
604 601
605 602 // verify operations
606 603 void verify();
607 604 void verify_scopes();
608 605 void verify_interrupt_point(address interrupt_point);
609 606
610 607 // print compilation helper
611 608 static void print_compilation(outputStream *st, const char *method_name, const char *title,
612 609 methodOop method, bool is_blocking, int compile_id, int bci, int comp_level);
613 610
614 611 // printing support
615 612 void print() const;
616 613 void print_code();
617 614 void print_relocations() PRODUCT_RETURN;
618 615 void print_pcs() PRODUCT_RETURN;
619 616 void print_scopes() PRODUCT_RETURN;
620 617 void print_dependencies() PRODUCT_RETURN;
621 618 void print_value_on(outputStream* st) const PRODUCT_RETURN;
622 619 void print_calls(outputStream* st) PRODUCT_RETURN;
623 620 void print_handler_table() PRODUCT_RETURN;
624 621 void print_nul_chk_table() PRODUCT_RETURN;
625 622 void print_nmethod(bool print_code);
626 623
627 624 // need to re-define this from CodeBlob else the overload hides it
628 625 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
629 626 void print_on(outputStream* st, const char* title) const;
630 627
631 628 // Logging
632 629 void log_identity(xmlStream* log) const;
633 630 void log_new_nmethod() const;
634 631 void log_state_change() const;
635 632
636 633 // Prints block-level comments, including nmethod specific block labels:
637 634 virtual void print_block_comment(outputStream* stream, address block_begin) {
638 635 print_nmethod_labels(stream, block_begin);
639 636 CodeBlob::print_block_comment(stream, block_begin);
640 637 }
641 638 void print_nmethod_labels(outputStream* stream, address block_begin);
642 639
643 640 // Prints a comment for one native instruction (reloc info, pc desc)
644 641 void print_code_comment_on(outputStream* st, int column, address begin, address end);
645 642 static void print_statistics() PRODUCT_RETURN;
646 643
647 644 // Compiler task identification. Note that all OSR methods
648 645 // are numbered in an independent sequence if CICountOSR is true,
649 646 // and native method wrappers are also numbered independently if
650 647 // CICountNative is true.
651 648 int compile_id() const { return _compile_id; }
652 649 const char* compile_kind() const;
653 650
654 651 // For debugging
655 652 // CompiledIC* IC_at(char* p) const;
656 653 // PrimitiveIC* primitiveIC_at(char* p) const;
657 654 oop embeddedOop_at(address p);
658 655
659 656 // tells if any of this method's dependencies have been invalidated
660 657 // (this is expensive!)
661 658 bool check_all_dependencies();
662 659
663 660 // tells if this compiled method is dependent on the given changes,
664 661 // and the changes have invalidated it
665 662 bool check_dependency_on(DepChange& changes);
666 663
667 664 // Evolution support. Tells if this compiled method is dependent on any of
668 665 // methods m() of class dependee, such that if m() in dependee is replaced,
669 666 // this compiled method will have to be deoptimized.
670 667 bool is_evol_dependent_on(klassOop dependee);
671 668
672 669 // Fast breakpoint support. Tells if this compiled method is
673 670 // dependent on the given method. Returns true if this nmethod
674 671 // corresponds to the given method as well.
675 672 bool is_dependent_on_method(methodOop dependee);
676 673
677 674 // is it ok to patch at address?
678 675 bool is_patchable_at(address instr_address);
679 676
680 677 // UseBiasedLocking support
681 678 ByteSize native_receiver_sp_offset() {
682 679 return _native_receiver_sp_offset;
683 680 }
684 681 ByteSize native_basic_lock_sp_offset() {
685 682 return _native_basic_lock_sp_offset;
686 683 }
687 684
688 685 // support for code generation
689 686 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
690 687 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
691 688 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
692 689
693 690 };
694 691
695 692 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
696 693 class nmethodLocker : public StackObj {
697 694 nmethod* _nm;
698 695
699 696 public:
700 697
701 698 static void lock_nmethod(nmethod* nm); // note: nm can be NULL
702 699 static void unlock_nmethod(nmethod* nm); // (ditto)
703 700
704 701 nmethodLocker(address pc); // derive nm from pc
705 702 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
706 703 nmethodLocker() { _nm = NULL; }
707 704 ~nmethodLocker() { unlock_nmethod(_nm); }
708 705
709 706 nmethod* code() { return _nm; }
710 707 void set_code(nmethod* new_nm) {
711 708 unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
712 709 _nm = new_nm;
713 710 lock_nmethod(_nm);
714 711 }
715 712 };
716 713
717 714 #endif // SHARE_VM_CODE_NMETHOD_HPP
↓ open down ↓ |
641 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX