src/share/vm/code/nmethod.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/code

src/share/vm/code/nmethod.hpp

Print this page




 285 
 286   int     oops_count() const { assert(oops_size() % oopSize == 0, "");  return (oops_size() / oopSize) + 1; }
 287   int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
 288 
 289   int total_size        () const;
 290 
 291   void dec_hotness_counter()        { _hotness_counter--; }
 292   void set_hotness_counter(int val) { _hotness_counter = val; }
 293   int  hotness_counter() const      { return _hotness_counter; }
 294 
 295   // Containment
 296   bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 297   bool metadata_contains     (Metadata** addr) const   { return metadata_begin     () <= addr && addr < metadata_end     (); }
 298   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 299   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 300 
 301   // entry points
 302   address entry_point() const                     { return _entry_point;             } // normal entry point
 303   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 304 
 305   enum { in_use       = 0,   // executable nmethod
 306          not_entrant  = 1,   // marked for deoptimization but activations may still exist,
 307                              // will be transformed to zombie when all activations are gone
 308          zombie       = 2,   // no activations exist, nmethod is ready for purge
 309          unloaded     = 3 }; // there should be no activations, should not be called,
 310                              // will be transformed to zombie immediately
 311 
 312   // flag accessing and manipulation
 313   bool  is_in_use() const                         { return _state == in_use; }
 314   bool  is_alive() const                          { unsigned char s = _state; return s < zombie; }
 315   bool  is_not_entrant() const                    { return _state == not_entrant; }
 316   bool  is_zombie() const                         { return _state == zombie; }
 317   bool  is_unloaded() const                       { return _state == unloaded; }
 318 
 319 #if INCLUDE_RTM_OPT
 320   // rtm state accessing and manipulating
 321   RTMState  rtm_state() const                     { return _rtm_state; }
 322   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 323 #endif
 324 
 325   // Make the nmethod non entrant. The nmethod will continue to be
 326   // alive.  It is used when an uncommon trap happens.  Returns true
 327   // if this thread changed the state of the nmethod or false if
 328   // another thread performed the transition.
 329   bool  make_not_entrant() {
 330     assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
 331     return make_not_entrant_or_zombie(not_entrant);


 566   // corresponds to the given method as well.
 567   virtual bool is_dependent_on_method(Method* dependee);
 568 
 569   // is it ok to patch at address?
 570   bool is_patchable_at(address instr_address);
 571 
 572   // UseBiasedLocking support
 573   ByteSize native_receiver_sp_offset() {
 574     return _native_receiver_sp_offset;
 575   }
 576   ByteSize native_basic_lock_sp_offset() {
 577     return _native_basic_lock_sp_offset;
 578   }
 579 
 580   // support for code generation
 581   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 582   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 583   static int state_offset()                       { return offset_of(nmethod, _state); }
 584 
 585   virtual void metadata_do(void f(Metadata*));








 586 };
 587 
 588 // Locks an nmethod so its code will not get removed and it will not
 589 // be made into a zombie, even if it is a not_entrant method. After the
 590 // nmethod becomes a zombie, if CompiledMethodUnload event processing
 591 // needs to be done, then lock_nmethod() is used directly to keep the
 592 // generated code from being reused too early.
 593 class nmethodLocker : public StackObj {
 594   CompiledMethod* _nm;
 595 
 596  public:
 597 
 598   // note: nm can be NULL
 599   // Only JvmtiDeferredEvent::compiled_method_unload_event()
 600   // should pass zombie_ok == true.
 601   static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false);
 602   static void unlock_nmethod(CompiledMethod* nm); // (ditto)
 603 
 604   nmethodLocker(address pc); // derive nm from pc
 605   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }




 285 
 286   int     oops_count() const { assert(oops_size() % oopSize == 0, "");  return (oops_size() / oopSize) + 1; }
 287   int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
 288 
 289   int total_size        () const;
 290 
 291   void dec_hotness_counter()        { _hotness_counter--; }
 292   void set_hotness_counter(int val) { _hotness_counter = val; }
 293   int  hotness_counter() const      { return _hotness_counter; }
 294 
 295   // Containment
 296   bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 297   bool metadata_contains     (Metadata** addr) const   { return metadata_begin     () <= addr && addr < metadata_end     (); }
 298   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 299   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 300 
 301   // entry points
 302   address entry_point() const                     { return _entry_point;             } // normal entry point
 303   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 304 







 305   // flag accessing and manipulation
 306   bool  is_in_use() const                         { return _state == in_use; }
 307   bool  is_alive() const                          { unsigned char s = _state; return s < zombie; }
 308   bool  is_not_entrant() const                    { return _state == not_entrant; }
 309   bool  is_zombie() const                         { return _state == zombie; }
 310   bool  is_unloaded() const                       { return _state == unloaded; }
 311 
 312 #if INCLUDE_RTM_OPT
 313   // rtm state accessing and manipulating
 314   RTMState  rtm_state() const                     { return _rtm_state; }
 315   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 316 #endif
 317 
 318   // Make the nmethod non entrant. The nmethod will continue to be
 319   // alive.  It is used when an uncommon trap happens.  Returns true
 320   // if this thread changed the state of the nmethod or false if
 321   // another thread performed the transition.
 322   bool  make_not_entrant() {
 323     assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
 324     return make_not_entrant_or_zombie(not_entrant);


 559   // corresponds to the given method as well.
 560   virtual bool is_dependent_on_method(Method* dependee);
 561 
 562   // is it ok to patch at address?
 563   bool is_patchable_at(address instr_address);
 564 
 565   // UseBiasedLocking support
 566   ByteSize native_receiver_sp_offset() {
 567     return _native_receiver_sp_offset;
 568   }
 569   ByteSize native_basic_lock_sp_offset() {
 570     return _native_basic_lock_sp_offset;
 571   }
 572 
 573   // support for code generation
 574   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 575   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 576   static int state_offset()                       { return offset_of(nmethod, _state); }
 577 
 578   virtual void metadata_do(void f(Metadata*));
 579 
 580   NativeCallWrapper* call_wrapper_at(address call) const;
 581   NativeCallWrapper* call_wrapper_before(address return_pc) const;
 582   address call_instruction_address(address pc) const;
 583 
 584   virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const;
 585   virtual CompiledStaticCall* compiledStaticCall_at(address addr) const;
 586   virtual CompiledStaticCall* compiledStaticCall_before(address addr) const;
 587 };
 588 
 589 // Locks an nmethod so its code will not get removed and it will not
 590 // be made into a zombie, even if it is a not_entrant method. After the
 591 // nmethod becomes a zombie, if CompiledMethodUnload event processing
 592 // needs to be done, then lock_nmethod() is used directly to keep the
 593 // generated code from being reused too early.
 594 class nmethodLocker : public StackObj {
 595   CompiledMethod* _nm;
 596 
 597  public:
 598 
 599   // note: nm can be NULL
 600   // Only JvmtiDeferredEvent::compiled_method_unload_event()
 601   // should pass zombie_ok == true.
 602   static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false);
 603   static void unlock_nmethod(CompiledMethod* nm); // (ditto)
 604 
 605   nmethodLocker(address pc); // derive nm from pc
 606   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }


src/share/vm/code/nmethod.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File