src/share/vm/code/nmethod.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8048721 Sdiff src/share/vm/code

src/share/vm/code/nmethod.hpp

Print this page




 739 
 740   // Fast breakpoint support. Tells if this compiled method is
 741   // dependent on the given method. Returns true if this nmethod
 742   // corresponds to the given method as well.
 743   bool is_dependent_on_method(Method* dependee);
 744 
 745   // is it ok to patch at address?
 746   bool is_patchable_at(address instr_address);
 747 
 748   // UseBiasedLocking support
 749   ByteSize native_receiver_sp_offset() {
 750     return _native_receiver_sp_offset;
 751   }
 752   ByteSize native_basic_lock_sp_offset() {
 753     return _native_basic_lock_sp_offset;
 754   }
 755 
 756   // support for code generation
 757   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 758   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 759   static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }
 760 
 761   // RedefineClasses support.   Mark metadata in nmethods as on_stack so that
 762   // redefine classes doesn't purge it.
 763   static void mark_on_stack(nmethod* nm) {
 764     nm->metadata_do(Metadata::mark_on_stack);
 765   }
 766   void metadata_do(void f(Metadata*));
 767 };
 768 
 769 // Locks an nmethod so its code will not get removed and it will not
 770 // be made into a zombie, even if it is a not_entrant method. After the
 771 // nmethod becomes a zombie, if CompiledMethodUnload event processing
 772 // needs to be done, then lock_nmethod() is used directly to keep the
 773 // generated code from being reused too early.
 774 class nmethodLocker : public StackObj {
 775   nmethod* _nm;
 776 
 777  public:
 778 
 779   // note: nm can be NULL


 739 
 740   // Fast breakpoint support. Tells if this compiled method is
 741   // dependent on the given method. Returns true if this nmethod
 742   // corresponds to the given method as well.
 743   bool is_dependent_on_method(Method* dependee);
 744 
 745   // is it ok to patch at address?
 746   bool is_patchable_at(address instr_address);
 747 
 748   // UseBiasedLocking support
 749   ByteSize native_receiver_sp_offset() {
 750     return _native_receiver_sp_offset;
 751   }
 752   ByteSize native_basic_lock_sp_offset() {
 753     return _native_basic_lock_sp_offset;
 754   }
 755 
 756   // support for code generation
 757   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 758   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 759   static int state_offset()                       { return offset_of(nmethod, _state); }
 760 
 761   // RedefineClasses support.   Mark metadata in nmethods as on_stack so that
 762   // redefine classes doesn't purge it.
 763   static void mark_on_stack(nmethod* nm) {
 764     nm->metadata_do(Metadata::mark_on_stack);
 765   }
 766   void metadata_do(void f(Metadata*));
 767 };
 768 
 769 // Locks an nmethod so its code will not get removed and it will not
 770 // be made into a zombie, even if it is a not_entrant method. After the
 771 // nmethod becomes a zombie, if CompiledMethodUnload event processing
 772 // needs to be done, then lock_nmethod() is used directly to keep the
 773 // generated code from being reused too early.
 774 class nmethodLocker : public StackObj {
 775   nmethod* _nm;
 776 
 777  public:
 778 
 779   // note: nm can be NULL
src/share/vm/code/nmethod.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File