src/share/vm/code/nmethod.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8048721 Sdiff src/share/vm/code

src/share/vm/code/nmethod.hpp

Print this page




 185   bool _marked_for_deoptimization;           // Used for stack deoptimization
 186 
 187   // used by jvmti to track if an unload event has been posted for this nmethod.
 188   bool _unload_reported;
 189 
 190   // set during construction
 191   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 192   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 193   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 194   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 195 
 196   // Protected by Patching_lock
 197   volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 198 
 199   volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
 200 
 201 #ifdef ASSERT
 202   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 203 #endif
 204 
 205   enum { in_use       = 0,   // executable nmethod
 206          not_entrant  = 1,   // marked for deoptimization but activations may still exist,
 207                              // will be transformed to zombie when all activations are gone
 208          zombie       = 2,   // no activations exist, nmethod is ready for purge
 209          unloaded     = 3 }; // there should be no activations, should not be called,
 210                              // will be transformed to zombie immediately
 211 
 212   jbyte _scavenge_root_state;
 213 
 214 #if INCLUDE_RTM_OPT
 215   // RTM state at compile time. Used during deoptimization to decide
 216   // whether to restart collecting RTM locking abort statistic again.
 217   RTMState _rtm_state;
 218 #endif
 219 
 220   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
 221   // and is not made into a zombie. However, once the nmethod is made into
 222   // a zombie, it will be locked one final time if CompiledMethodUnload
 223   // event processing needs to be done.
 224   volatile jint _lock_count;
 225 
 226   // not_entrant method removal. Each mark_sweep pass will update
 227   // this mark to current sweep invocation count if it is seen on the
 228   // stack.  An not_entrant method can be removed when there are no
 229   // more activations, i.e., when the _stack_traversal_mark is less than
 230   // current sweep traversal index.
 231   long _stack_traversal_mark;


 414 
 415   void dec_hotness_counter()        { _hotness_counter--; }
 416   void set_hotness_counter(int val) { _hotness_counter = val; }
 417   int  hotness_counter() const      { return _hotness_counter; }
 418 
 419   // Containment
 420   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
 421   bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
 422   bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
 423   bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 424   bool metadata_contains     (Metadata** addr) const   { return metadata_begin     () <= addr && addr < metadata_end     (); }
 425   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 426   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 427   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 428   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 429 
 430   // entry points
 431   address entry_point() const                     { return _entry_point;             } // normal entry point
 432   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 433 







 434   // flag accessing and manipulation
 435   bool  is_in_use() const                         { return _state == in_use; }
 436   bool  is_alive() const                          { return _state == in_use || _state == not_entrant; }
 437   bool  is_not_entrant() const                    { return _state == not_entrant; }
 438   bool  is_zombie() const                         { return _state == zombie; }
 439   bool  is_unloaded() const                       { return _state == unloaded;   }
 440 
 441 #if INCLUDE_RTM_OPT
 442   // rtm state accessing and manipulating
 443   RTMState  rtm_state() const                     { return _rtm_state; }
 444   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 445 #endif
 446 
 447   // Make the nmethod non entrant. The nmethod will continue to be
 448   // alive.  It is used when an uncommon trap happens.  Returns true
 449   // if this thread changed the state of the nmethod or false if
 450   // another thread performed the transition.
 451   bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
 452   bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 453 


 739 
 740   // Fast breakpoint support. Tells if this compiled method is
 741   // dependent on the given method. Returns true if this nmethod
 742   // corresponds to the given method as well.
 743   bool is_dependent_on_method(Method* dependee);
 744 
 745   // is it ok to patch at address?
 746   bool is_patchable_at(address instr_address);
 747 
 748   // UseBiasedLocking support
 749   ByteSize native_receiver_sp_offset() {
 750     return _native_receiver_sp_offset;
 751   }
 752   ByteSize native_basic_lock_sp_offset() {
 753     return _native_basic_lock_sp_offset;
 754   }
 755 
 756   // support for code generation
 757   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 758   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 759   static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }
 760 
 761   // RedefineClasses support.   Mark metadata in nmethods as on_stack so that
 762   // redefine classes doesn't purge it.
 763   static void mark_on_stack(nmethod* nm) {
 764     nm->metadata_do(Metadata::mark_on_stack);
 765   }
 766   void metadata_do(void f(Metadata*));
 767 };
 768 
 769 // Locks an nmethod so its code will not get removed and it will not
 770 // be made into a zombie, even if it is a not_entrant method. After the
 771 // nmethod becomes a zombie, if CompiledMethodUnload event processing
 772 // needs to be done, then lock_nmethod() is used directly to keep the
 773 // generated code from being reused too early.
 774 class nmethodLocker : public StackObj {
 775   nmethod* _nm;
 776 
 777  public:
 778 
 779   // note: nm can be NULL


 185   bool _marked_for_deoptimization;           // Used for stack deoptimization
 186 
 187   // used by jvmti to track if an unload event has been posted for this nmethod.
 188   bool _unload_reported;
 189 
 190   // set during construction
 191   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 192   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 193   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 194   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 195 
 196   // Protected by Patching_lock
 197   volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 198 
 199   volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
 200 
 201 #ifdef ASSERT
 202   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 203 #endif
 204 







 205   jbyte _scavenge_root_state;
 206 
 207 #if INCLUDE_RTM_OPT
 208   // RTM state at compile time. Used during deoptimization to decide
 209   // whether to restart collecting RTM locking abort statistic again.
 210   RTMState _rtm_state;
 211 #endif
 212 
 213   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
 214   // and is not made into a zombie. However, once the nmethod is made into
 215   // a zombie, it will be locked one final time if CompiledMethodUnload
 216   // event processing needs to be done.
 217   volatile jint _lock_count;
 218 
 219   // not_entrant method removal. Each mark_sweep pass will update
 220   // this mark to current sweep invocation count if it is seen on the
 221   // stack.  An not_entrant method can be removed when there are no
 222   // more activations, i.e., when the _stack_traversal_mark is less than
 223   // current sweep traversal index.
 224   long _stack_traversal_mark;


 407 
 408   void dec_hotness_counter()        { _hotness_counter--; }
 409   void set_hotness_counter(int val) { _hotness_counter = val; }
 410   int  hotness_counter() const      { return _hotness_counter; }
 411 
 412   // Containment
 413   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
 414   bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
 415   bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
 416   bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 417   bool metadata_contains     (Metadata** addr) const   { return metadata_begin     () <= addr && addr < metadata_end     (); }
 418   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 419   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 420   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 421   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 422 
 423   // entry points
 424   address entry_point() const                     { return _entry_point;             } // normal entry point
 425   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 426 
 427   enum { in_use       = 0,   // executable nmethod
 428          not_entrant  = 1,   // marked for deoptimization but activations may still exist,
 429                              // will be transformed to zombie when all activations are gone
 430          zombie       = 2,   // no activations exist, nmethod is ready for purge
 431          unloaded     = 3 }; // there should be no activations, should not be called,
 432                              // will be transformed to zombie immediately
 433 
 434   // flag accessing and manipulation
 435   bool  is_in_use() const                         { return _state == in_use; }
 436   bool  is_alive() const                          { return _state == in_use || _state == not_entrant; }
 437   bool  is_not_entrant() const                    { return _state == not_entrant; }
 438   bool  is_zombie() const                         { return _state == zombie; }
 439   bool  is_unloaded() const                       { return _state == unloaded;   }
 440 
 441 #if INCLUDE_RTM_OPT
 442   // rtm state accessing and manipulating
 443   RTMState  rtm_state() const                     { return _rtm_state; }
 444   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 445 #endif
 446 
 447   // Make the nmethod non entrant. The nmethod will continue to be
 448   // alive.  It is used when an uncommon trap happens.  Returns true
 449   // if this thread changed the state of the nmethod or false if
 450   // another thread performed the transition.
 451   bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
 452   bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 453 


 739 
 740   // Fast breakpoint support. Tells if this compiled method is
 741   // dependent on the given method. Returns true if this nmethod
 742   // corresponds to the given method as well.
 743   bool is_dependent_on_method(Method* dependee);
 744 
 745   // is it ok to patch at address?
 746   bool is_patchable_at(address instr_address);
 747 
 748   // UseBiasedLocking support
 749   ByteSize native_receiver_sp_offset() {
 750     return _native_receiver_sp_offset;
 751   }
 752   ByteSize native_basic_lock_sp_offset() {
 753     return _native_basic_lock_sp_offset;
 754   }
 755 
 756   // support for code generation
 757   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 758   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 759   static int state_offset()                       { return offset_of(nmethod, _state); }
 760 
 761   // RedefineClasses support.   Mark metadata in nmethods as on_stack so that
 762   // redefine classes doesn't purge it.
 763   static void mark_on_stack(nmethod* nm) {
 764     nm->metadata_do(Metadata::mark_on_stack);
 765   }
 766   void metadata_do(void f(Metadata*));
 767 };
 768 
 769 // Locks an nmethod so its code will not get removed and it will not
 770 // be made into a zombie, even if it is a not_entrant method. After the
 771 // nmethod becomes a zombie, if CompiledMethodUnload event processing
 772 // needs to be done, then lock_nmethod() is used directly to keep the
 773 // generated code from being reused too early.
 774 class nmethodLocker : public StackObj {
 775   nmethod* _nm;
 776 
 777  public:
 778 
 779   // note: nm can be NULL
src/share/vm/code/nmethod.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File