src/share/vm/code/nmethod.hpp

Print this page




  94 //  [Debugging information]
  95 //  - oop array
  96 //  - data array
  97 //  - pcs
  98 //  [Exception handler table]
  99 //  - handler entry point array
 100 //  [Implicit Null Pointer exception table]
 101 //  - implicit null table array
 102 
 103 class Dependencies;
 104 class ExceptionHandlerTable;
 105 class ImplicitExceptionTable;
 106 class AbstractCompiler;
 107 class xmlStream;
 108 
 109 class nmethod : public CodeBlob {
 110   friend class VMStructs;
 111   friend class NMethodSweeper;
 112   friend class CodeCache;  // scavengable oops
 113  private:





 114   // Shared fields for all nmethod's
 115   Method*   _method;
 116   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
 117   jmethodID _jmethod_id;       // Cache of method()->jmethod_id()
 118 
 119   // To support simple linked-list chaining of nmethods:
 120   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head





 121   nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods

 122 
 123   static nmethod* volatile _oops_do_mark_nmethods;
 124   nmethod*        volatile _oops_do_mark_link;
 125 
 126   AbstractCompiler* _compiler; // The compiler which compiled this nmethod
 127 
 128   // offsets for entry points
 129   address _entry_point;                      // entry point with class check
 130   address _verified_entry_point;             // entry point without class check
 131   address _osr_entry_point;                  // entry point for on stack replacement
 132 
 133   // Offsets for different nmethod parts
 134   int _exception_offset;
 135   // All deoptee's will resume execution at this location described by
 136   // this offset.
 137   int _deoptimize_offset;
 138   // All deoptee's at a MethodHandle call site will resume execution
 139   // at this location described by this offset.
 140   int _deoptimize_mh_offset;
 141   // Offset of the unwind handler if it exists


 163   int _comp_level;                           // compilation level
 164 
 165   // protected by CodeCache_lock
 166   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
 167 
 168   bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
 169   bool _marked_for_deoptimization;           // Used for stack deoptimization
 170 
 171   // used by jvmti to track if an unload event has been posted for this nmethod.
 172   bool _unload_reported;
 173 
 174   // set during construction
 175   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 176   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 177   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 178   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 179 
 180   // Protected by Patching_lock
 181   volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 182 


 183 #ifdef ASSERT
 184   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 185 #endif
 186 
 187   enum { in_use       = 0,   // executable nmethod
 188          not_entrant  = 1,   // marked for deoptimization but activations may still exist,
 189                              // will be transformed to zombie when all activations are gone
 190          zombie       = 2,   // no activations exist, nmethod is ready for purge
 191          unloaded     = 3 }; // there should be no activations, should not be called,
 192                              // will be transformed to zombie immediately
 193 
 194   jbyte _scavenge_root_state;
 195 
 196 #if INCLUDE_RTM_OPT
 197   // RTM state at compile time. Used during deoptimization to decide
 198   // whether to restart collecting RTM locking abort statistic again.
 199   RTMState _rtm_state;
 200 #endif
 201 
 202   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed


 420   bool  is_zombie() const                         { return _state == zombie; }
 421   bool  is_unloaded() const                       { return _state == unloaded;   }
 422 
 423 #if INCLUDE_RTM_OPT
 424   // rtm state accessing and manipulating
 425   RTMState  rtm_state() const                     { return _rtm_state; }
 426   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 427 #endif
 428 
 429   // Make the nmethod non entrant. The nmethod will continue to be
 430   // alive.  It is used when an uncommon trap happens.  Returns true
 431   // if this thread changed the state of the nmethod or false if
 432   // another thread performed the transition.
 433   bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
 434   bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 435 
 436   // used by jvmti to track if the unload event has been reported
 437   bool  unload_reported()                         { return _unload_reported; }
 438   void  set_unload_reported()                     { _unload_reported = true; }
 439 









 440   bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
 441   void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
 442 
 443   void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
 444 
 445   bool has_dependencies()                         { return dependencies_size() != 0; }
 446   void flush_dependencies(BoolObjectClosure* is_alive);
 447   bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
 448   void set_has_flushed_dependencies()             {
 449     assert(!has_flushed_dependencies(), "should only happen once");
 450     _has_flushed_dependencies = 1;
 451   }
 452 
 453   bool  is_marked_for_reclamation() const         { return _marked_for_reclamation; }
 454   void  mark_for_reclamation()                    { _marked_for_reclamation = 1; }
 455 
 456   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 457   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 458 
 459   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }


 535   address continuation_for_implicit_exception(address pc);
 536 
 537   // On-stack replacement support
 538   int   osr_entry_bci() const                     { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
 539   address  osr_entry() const                      { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
 540   void  invalidate_osr_method();
 541   nmethod* osr_link() const                       { return _osr_link; }
 542   void     set_osr_link(nmethod *n)               { _osr_link = n; }
 543 
 544   // tells whether frames described by this nmethod can be deoptimized
 545   // note: native wrappers cannot be deoptimized.
 546   bool can_be_deoptimized() const { return is_java_method(); }
 547 
 548   // Inline cache support
 549   void clear_inline_caches();
 550   void cleanup_inline_caches();
 551   bool inlinecache_check_contains(address addr) const {
 552     return (addr >= code_begin() && addr < verified_entry_point());
 553   }
 554 




 555   // Check that all metadata is still alive
 556   void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
 557 
 558   // unlink and deallocate this nmethod
 559   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
 560   // expected to use any other private methods/data in this class.
 561 
 562  protected:
 563   void flush();
 564 
 565  public:
 566   // When true is returned, it is unsafe to remove this nmethod even if
 567   // it is a zombie, since the VM or the ServiceThread might still be
 568   // using it.
 569   bool is_locked_by_vm() const                    { return _lock_count >0; }
 570 
 571   // See comment at definition of _last_seen_on_stack
 572   void mark_as_seen_on_stack();
 573   bool can_not_entrant_be_converted();
 574 
 575   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
 576   void set_method(Method* method) { _method = method; }
 577 
 578   // GC support
 579   void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);




 580   bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
 581 
 582   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
 583                                      OopClosure* f);
 584   void oops_do(OopClosure* f) { oops_do(f, false); }
 585   void oops_do(OopClosure* f, bool allow_zombie);
 586   bool detect_scavenge_root_oops();
 587   void verify_scavenge_root_oops() PRODUCT_RETURN;
 588 
 589   bool test_set_oops_do_mark();
 590   static void oops_do_marking_prologue();
 591   static void oops_do_marking_epilogue();
 592   static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
 593   bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
 594 
 595   // ScopeDesc for an instruction
 596   ScopeDesc* scope_desc_at(address pc);
 597 
 598  private:
 599   ScopeDesc* scope_desc_in(address begin, address end);




  94 //  [Debugging information]
  95 //  - oop array
  96 //  - data array
  97 //  - pcs
  98 //  [Exception handler table]
  99 //  - handler entry point array
 100 //  [Implicit Null Pointer exception table]
 101 //  - implicit null table array
 102 
 103 class Dependencies;
 104 class ExceptionHandlerTable;
 105 class ImplicitExceptionTable;
 106 class AbstractCompiler;
 107 class xmlStream;
 108 
 109 class nmethod : public CodeBlob {
 110   friend class VMStructs;
 111   friend class NMethodSweeper;
 112   friend class CodeCache;  // scavengable oops
 113  private:
 114 
 115   // GC support to help figure out if an nmethod has been
 116   // cleaned/unloaded by the current GC.
 117   static unsigned char _global_unloading_clock;
 118 
 119   // Shared fields for all nmethod's
 120   Method*   _method;
 121   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
 122   jmethodID _jmethod_id;       // Cache of method()->jmethod_id()
 123 
 124   // To support simple linked-list chaining of nmethods:
 125   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
 126 
 127   union {
 128     // Used by G1 to chain nmethods.
 129     nmethod* _unloading_next;
 130     // Used by non-G1 GCs to chain nmethods.
 131     nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
 132   };
 133 
 134   static nmethod* volatile _oops_do_mark_nmethods;
 135   nmethod*        volatile _oops_do_mark_link;
 136 
 137   AbstractCompiler* _compiler; // The compiler which compiled this nmethod
 138 
 139   // offsets for entry points
 140   address _entry_point;                      // entry point with class check
 141   address _verified_entry_point;             // entry point without class check
 142   address _osr_entry_point;                  // entry point for on stack replacement
 143 
 144   // Offsets for different nmethod parts
 145   int _exception_offset;
 146   // All deoptee's will resume execution at this location described by
 147   // this offset.
 148   int _deoptimize_offset;
 149   // All deoptee's at a MethodHandle call site will resume execution
 150   // at this location described by this offset.
 151   int _deoptimize_mh_offset;
 152   // Offset of the unwind handler if it exists


 174   int _comp_level;                           // compilation level
 175 
 176   // protected by CodeCache_lock
 177   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
 178 
 179   bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
 180   bool _marked_for_deoptimization;           // Used for stack deoptimization
 181 
 182   // used by jvmti to track if an unload event has been posted for this nmethod.
 183   bool _unload_reported;
 184 
 185   // set during construction
 186   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 187   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 188   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 189   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 190 
 191   // Protected by Patching_lock
 192   volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 193 
 194   volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
 195 
 196 #ifdef ASSERT
 197   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 198 #endif
 199 
 200   enum { in_use       = 0,   // executable nmethod
 201          not_entrant  = 1,   // marked for deoptimization but activations may still exist,
 202                              // will be transformed to zombie when all activations are gone
 203          zombie       = 2,   // no activations exist, nmethod is ready for purge
 204          unloaded     = 3 }; // there should be no activations, should not be called,
 205                              // will be transformed to zombie immediately
 206 
 207   jbyte _scavenge_root_state;
 208 
 209 #if INCLUDE_RTM_OPT
 210   // RTM state at compile time. Used during deoptimization to decide
 211   // whether to restart collecting RTM locking abort statistic again.
 212   RTMState _rtm_state;
 213 #endif
 214 
 215   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed


 433   bool  is_zombie() const                         { return _state == zombie; }
 434   bool  is_unloaded() const                       { return _state == unloaded;   }
 435 
 436 #if INCLUDE_RTM_OPT
 437   // rtm state accessing and manipulating
 438   RTMState  rtm_state() const                     { return _rtm_state; }
 439   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 440 #endif
 441 
 442   // Make the nmethod non entrant. The nmethod will continue to be
 443   // alive.  It is used when an uncommon trap happens.  Returns true
 444   // if this thread changed the state of the nmethod or false if
 445   // another thread performed the transition.
 446   bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
 447   bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 448 
 449   // used by jvmti to track if the unload event has been reported
 450   bool  unload_reported()                         { return _unload_reported; }
 451   void  set_unload_reported()                     { _unload_reported = true; }
 452 
 453   void set_unloading_next(nmethod* next)          { _unloading_next = next; }
 454   nmethod* unloading_next()                       { return _unloading_next; }
 455 
 456   static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
 457   static void increase_unloading_clock();
 458 
 459   void set_unloading_clock(unsigned char unloading_clock);
 460   unsigned char unloading_clock();
 461 
 462   bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
 463   void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
 464 
 465   void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
 466 
 467   bool has_dependencies()                         { return dependencies_size() != 0; }
 468   void flush_dependencies(BoolObjectClosure* is_alive);
 469   bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
 470   void set_has_flushed_dependencies()             {
 471     assert(!has_flushed_dependencies(), "should only happen once");
 472     _has_flushed_dependencies = 1;
 473   }
 474 
 475   bool  is_marked_for_reclamation() const         { return _marked_for_reclamation; }
 476   void  mark_for_reclamation()                    { _marked_for_reclamation = 1; }
 477 
 478   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 479   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 480 
 481   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }


 557   address continuation_for_implicit_exception(address pc);
 558 
 559   // On-stack replacement support
 560   int   osr_entry_bci() const                     { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
 561   address  osr_entry() const                      { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
 562   void  invalidate_osr_method();
 563   nmethod* osr_link() const                       { return _osr_link; }
 564   void     set_osr_link(nmethod *n)               { _osr_link = n; }
 565 
 566   // tells whether frames described by this nmethod can be deoptimized
 567   // note: native wrappers cannot be deoptimized.
 568   bool can_be_deoptimized() const { return is_java_method(); }
 569 
 570   // Inline cache support
 571   void clear_inline_caches();
 572   void cleanup_inline_caches();
 573   bool inlinecache_check_contains(address addr) const {
 574     return (addr >= code_begin() && addr < verified_entry_point());
 575   }
 576 
 577   // Verify calls to dead methods have been cleaned.
 578   void verify_clean_inline_caches();
 579   // Verify and count cached icholder relocations.
 580   int  verify_icholder_relocations();
 581   // Check that all metadata is still alive
 582   void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
 583 
 584   // unlink and deallocate this nmethod
 585   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
 586   // expected to use any other private methods/data in this class.
 587 
 588  protected:
 589   void flush();
 590 
 591  public:
 592   // When true is returned, it is unsafe to remove this nmethod even if
 593   // it is a zombie, since the VM or the ServiceThread might still be
 594   // using it.
 595   bool is_locked_by_vm() const                    { return _lock_count >0; }
 596 
 597   // See comment at definition of _last_seen_on_stack
 598   void mark_as_seen_on_stack();
 599   bool can_not_entrant_be_converted();
 600 
 601   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
 602   void set_method(Method* method) { _method = method; }
 603 
 604   // GC support
 605   void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
 606   //  The parallel versions are used by G1.
 607   bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
 608   void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
 609   //  Unload a nmethod if the *root object is dead.
 610   bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
 611 
 612   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
 613                                      OopClosure* f);
 614   void oops_do(OopClosure* f) { oops_do(f, false); }
 615   void oops_do(OopClosure* f, bool allow_zombie);
 616   bool detect_scavenge_root_oops();
 617   void verify_scavenge_root_oops() PRODUCT_RETURN;
 618 
 619   bool test_set_oops_do_mark();
 620   static void oops_do_marking_prologue();
 621   static void oops_do_marking_epilogue();
 622   static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
 623   bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
 624 
 625   // ScopeDesc for an instruction
 626   ScopeDesc* scope_desc_at(address pc);
 627 
 628  private:
 629   ScopeDesc* scope_desc_in(address begin, address end);