88
89 unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
90 unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
91 unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
92 unsigned int markedForReclamation:1; // Used by NMethodSweeper
93
94 unsigned int has_unsafe_access:1; // May fault due to unsafe access.
95 unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
96
97 unsigned int speculatively_disconnected:1; // Marked for potential unload
98
99 void clear();
100 };
101
102
103 // A nmethod contains:
104 // - header (the nmethod structure)
105 // [Relocation]
106 // - relocation information
107 // - constant part (doubles, longs and floats used in nmethod)
108 // [Code]
109 // - code body
110 // - exception handler
111 // - stub code
112 // [Debugging information]
113 // - oop array
114 // - data array
115 // - pcs
116 // [Exception handler table]
117 // - handler entry point array
118 // [Implicit Null Pointer exception table]
119 // - implicit null table array
120
121 class Dependencies;
122 class ExceptionHandlerTable;
123 class ImplicitExceptionTable;
124 class AbstractCompiler;
125 class xmlStream;
126
127 class nmethod : public CodeBlob {
144 nmethod* volatile _oops_do_mark_link;
145
146 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
147
148 // Offsets for different nmethod parts
149 int _exception_offset;
150 // All deoptee's will resume execution at this location described by
151 // this offset.
152 int _deoptimize_offset;
153 // All deoptee's at a MethodHandle call site will resume execution
154 // at this location described by this offset.
155 int _deoptimize_mh_offset;
156 // Offset of the unwind handler if it exists
157 int _unwind_handler_offset;
158
159 #ifdef HAVE_DTRACE_H
160 int _trap_offset;
161 #endif // def HAVE_DTRACE_H
162 int _stub_offset;
163 int _consts_offset;
164 int _scopes_data_offset;
165 int _scopes_pcs_offset;
166 int _dependencies_offset;
167 int _handler_table_offset;
168 int _nul_chk_table_offset;
169 int _nmethod_end_offset;
170
171 // location in frame (offset for sp) that deopt can store the original
172 // pc during a deopt.
173 int _orig_pc_offset;
174
175 int _compile_id; // which compilation made this nmethod
176 int _comp_level; // compilation level
177
178 // offsets for entry points
179 address _entry_point; // entry point with class check
180 address _verified_entry_point; // entry point without class check
181 address _osr_entry_point; // entry point for on stack replacement
182
183 nmFlags flags; // various flags to keep track of nmethod state
330
331 // type info
332 bool is_nmethod() const { return true; }
333 bool is_java_method() const { return !method()->is_native(); }
334 bool is_native_method() const { return method()->is_native(); }
335 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
336
337 bool is_compiled_by_c1() const;
338 bool is_compiled_by_c2() const;
339
340 // boundaries for different parts
341 address code_begin () const { return _entry_point; }
342 address code_end () const { return header_begin() + _stub_offset ; }
343 address exception_begin () const { return header_begin() + _exception_offset ; }
344 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
345 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
346 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
347 address stub_begin () const { return header_begin() + _stub_offset ; }
348 address stub_end () const { return header_begin() + _consts_offset ; }
349 address consts_begin () const { return header_begin() + _consts_offset ; }
350 address consts_end () const { return header_begin() + _scopes_data_offset ; }
351 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
352 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
353 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
354 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
355 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
356 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
357 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
358 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
359 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
360 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
361
362 int code_size () const { return code_end () - code_begin (); }
363 int stub_size () const { return stub_end () - stub_begin (); }
364 int consts_size () const { return consts_end () - consts_begin (); }
365 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
366 int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); }
367 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
368 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
369 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
370
371 int total_size () const;
372
373 bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
374 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
375 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
376 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
377 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
378 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
379 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
380
381 // entry points
382 address entry_point() const { return _entry_point; } // normal entry point
383 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
384
385 // flag accessing and manipulation
386 bool is_in_use() const { return flags.state == alive; }
387 bool is_alive() const { return flags.state == alive || flags.state == not_entrant; }
388 bool is_not_entrant() const { return flags.state == not_entrant; }
389 bool is_zombie() const { return flags.state == zombie; }
390 bool is_unloaded() const { return flags.state == unloaded; }
391
392 // Make the nmethod non entrant. The nmethod will continue to be
393 // alive. It is used when an uncommon trap happens. Returns true
394 // if this thread changed the state of the nmethod or false if
395 // another thread performed the transition.
414 }
415
416 bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
417 void mark_for_reclamation() { flags.markedForReclamation = 1; }
418 void unmark_for_reclamation() { flags.markedForReclamation = 0; }
419
420 bool has_unsafe_access() const { return flags.has_unsafe_access; }
421 void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
422
423 bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
424 void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
425
426 bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
427 void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
428
429 int comp_level() const { return _comp_level; }
430
431 int version() const { return flags.version; }
432 void set_version(int v);
433
434 // Non-perm oop support
435 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
436 protected:
437 enum { npl_on_list = 0x01, npl_marked = 0x10 };
438 void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
439 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
440 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
441 #ifndef PRODUCT
442 void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
443 void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
444 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
445 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
446 #endif //PRODUCT
447 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
448 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
449
450 nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
451 void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
452
453 public:
494 public:
495 // If returning true, it is unsafe to remove this nmethod even though it is a zombie
496 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
497 bool is_locked_by_vm() const { return _lock_count >0; }
498
499 // See comment at definition of _last_seen_on_stack
500 void mark_as_seen_on_stack();
501 bool can_not_entrant_be_converted();
502
503 // Evolution support. We make old (discarded) compiled methods point to new methodOops.
504 void set_method(methodOop method) { _method = method; }
505
506 // GC support
507 void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
508 bool unloading_occurred);
509 bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
510 oop* root, bool unloading_occurred);
511
512 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
513 OopClosure* f);
514 virtual void oops_do(OopClosure* f) { oops_do(f, false); }
515 void oops_do(OopClosure* f, bool do_strong_roots_only);
516 bool detect_scavenge_root_oops();
517 void verify_scavenge_root_oops() PRODUCT_RETURN;
518
519 bool test_set_oops_do_mark();
520 static void oops_do_marking_prologue();
521 static void oops_do_marking_epilogue();
522 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
523 DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
524
525 // ScopeDesc for an instruction
526 ScopeDesc* scope_desc_at(address pc);
527
528 private:
529 ScopeDesc* scope_desc_in(address begin, address end);
530
531 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
532
533 PcDesc* find_pc_desc_internal(address pc, bool approximate);
534
|
88
89 unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
90 unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
91 unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
92 unsigned int markedForReclamation:1; // Used by NMethodSweeper
93
94 unsigned int has_unsafe_access:1; // May fault due to unsafe access.
95 unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
96
97 unsigned int speculatively_disconnected:1; // Marked for potential unload
98
99 void clear();
100 };
101
102
103 // A nmethod contains:
104 // - header (the nmethod structure)
105 // [Relocation]
106 // - relocation information
107 // - constant part (doubles, longs and floats used in nmethod)
108 // - oop table
109 // [Code]
110 // - code body
111 // - exception handler
112 // - stub code
113 // [Debugging information]
114 // - oop array
115 // - data array
116 // - pcs
117 // [Exception handler table]
118 // - handler entry point array
119 // [Implicit Null Pointer exception table]
120 // - implicit null table array
121
122 class Dependencies;
123 class ExceptionHandlerTable;
124 class ImplicitExceptionTable;
125 class AbstractCompiler;
126 class xmlStream;
127
128 class nmethod : public CodeBlob {
145 nmethod* volatile _oops_do_mark_link;
146
147 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
148
149 // Offsets for different nmethod parts
150 int _exception_offset;
151 // All deoptee's will resume execution at this location described by
152 // this offset.
153 int _deoptimize_offset;
154 // All deoptee's at a MethodHandle call site will resume execution
155 // at this location described by this offset.
156 int _deoptimize_mh_offset;
157 // Offset of the unwind handler if it exists
158 int _unwind_handler_offset;
159
160 #ifdef HAVE_DTRACE_H
161 int _trap_offset;
162 #endif // def HAVE_DTRACE_H
163 int _stub_offset;
164 int _consts_offset;
165 int _oops_offset; // offset to where embedded oop table begins (inside data)
166 int _scopes_data_offset;
167 int _scopes_pcs_offset;
168 int _dependencies_offset;
169 int _handler_table_offset;
170 int _nul_chk_table_offset;
171 int _nmethod_end_offset;
172
173 // location in frame (offset for sp) that deopt can store the original
174 // pc during a deopt.
175 int _orig_pc_offset;
176
177 int _compile_id; // which compilation made this nmethod
178 int _comp_level; // compilation level
179
180 // offsets for entry points
181 address _entry_point; // entry point with class check
182 address _verified_entry_point; // entry point without class check
183 address _osr_entry_point; // entry point for on stack replacement
184
185 nmFlags flags; // various flags to keep track of nmethod state
332
333 // type info
334 bool is_nmethod() const { return true; }
335 bool is_java_method() const { return !method()->is_native(); }
336 bool is_native_method() const { return method()->is_native(); }
337 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
338
339 bool is_compiled_by_c1() const;
340 bool is_compiled_by_c2() const;
341
342 // boundaries for different parts
343 address code_begin () const { return _entry_point; }
344 address code_end () const { return header_begin() + _stub_offset ; }
345 address exception_begin () const { return header_begin() + _exception_offset ; }
346 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
347 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
348 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
349 address stub_begin () const { return header_begin() + _stub_offset ; }
350 address stub_end () const { return header_begin() + _consts_offset ; }
351 address consts_begin () const { return header_begin() + _consts_offset ; }
352 address consts_end () const { return header_begin() + _oops_offset ; }
353 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
354 oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
355
356 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
357 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
358 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
359 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
360 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
361 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
362 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
363 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
364 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
365 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
366
367 // Sizes
368 int code_size () const { return code_end () - code_begin (); }
369 int stub_size () const { return stub_end () - stub_begin (); }
370 int consts_size () const { return consts_end () - consts_begin (); }
371 int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
372 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
373 int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
374 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
375 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
376 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
377
378 int total_size () const;
379
380 // Containment
381 bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
382 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
383 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
384 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
385 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
386 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
387 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
388 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
389
390 // entry points
391 address entry_point() const { return _entry_point; } // normal entry point
392 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
393
394 // flag accessing and manipulation
395 bool is_in_use() const { return flags.state == alive; }
396 bool is_alive() const { return flags.state == alive || flags.state == not_entrant; }
397 bool is_not_entrant() const { return flags.state == not_entrant; }
398 bool is_zombie() const { return flags.state == zombie; }
399 bool is_unloaded() const { return flags.state == unloaded; }
400
401 // Make the nmethod non entrant. The nmethod will continue to be
402 // alive. It is used when an uncommon trap happens. Returns true
403 // if this thread changed the state of the nmethod or false if
404 // another thread performed the transition.
423 }
424
425 bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
426 void mark_for_reclamation() { flags.markedForReclamation = 1; }
427 void unmark_for_reclamation() { flags.markedForReclamation = 0; }
428
429 bool has_unsafe_access() const { return flags.has_unsafe_access; }
430 void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
431
432 bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
433 void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
434
435 bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
436 void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
437
438 int comp_level() const { return _comp_level; }
439
440 int version() const { return flags.version; }
441 void set_version(int v);
442
443 // Support for oops in scopes and relocs:
444 // Note: index 0 is reserved for null.
445 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
446 oop* oop_addr_at(int index) const { // for GC
447 // relocation indexes are biased by 1 (because 0 is reserved)
448 assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
449 return &oops_begin()[index - 1];
450 }
451
452 void copy_oops(GrowableArray<jobject>* oops);
453
454 // Relocation support
455 private:
456 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
457 inline void initialize_immediate_oop(oop* dest, jobject handle);
458
459 public:
460 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
461 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
462
463 bool is_at_poll_return(address pc);
464 bool is_at_poll_or_poll_return(address pc);
465
466 // Non-perm oop support
467 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
468 protected:
469 enum { npl_on_list = 0x01, npl_marked = 0x10 };
470 void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
471 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
472 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
473 #ifndef PRODUCT
474 void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
475 void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
476 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
477 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
478 #endif //PRODUCT
479 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
480 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
481
482 nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
483 void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
484
485 public:
526 public:
527 // If returning true, it is unsafe to remove this nmethod even though it is a zombie
528 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
529 bool is_locked_by_vm() const { return _lock_count >0; }
530
531 // See comment at definition of _last_seen_on_stack
532 void mark_as_seen_on_stack();
533 bool can_not_entrant_be_converted();
534
535 // Evolution support. We make old (discarded) compiled methods point to new methodOops.
536 void set_method(methodOop method) { _method = method; }
537
538 // GC support
539 void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
540 bool unloading_occurred);
541 bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
542 oop* root, bool unloading_occurred);
543
544 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
545 OopClosure* f);
546 void oops_do(OopClosure* f) { oops_do(f, false); }
547 void oops_do(OopClosure* f, bool do_strong_roots_only);
548 bool detect_scavenge_root_oops();
549 void verify_scavenge_root_oops() PRODUCT_RETURN;
550
551 bool test_set_oops_do_mark();
552 static void oops_do_marking_prologue();
553 static void oops_do_marking_epilogue();
554 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
555 DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
556
557 // ScopeDesc for an instruction
558 ScopeDesc* scope_desc_at(address pc);
559
560 private:
561 ScopeDesc* scope_desc_in(address begin, address end);
562
563 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
564
565 PcDesc* find_pc_desc_internal(address pc, bool approximate);
566
|