src/share/vm/code/nmethod.hpp

Print this page




 177   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 178   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 179 
 180   // Protected by Patching_lock
 181   unsigned char _state;                      // {alive, not_entrant, zombie, unloaded)
 182 
 183 #ifdef ASSERT
 184   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 185 #endif
 186 
 187   enum { alive        = 0,
 188          not_entrant  = 1, // uncommon trap has happened but activations may still exist
 189          zombie       = 2,
 190          unloaded     = 3 };
 191 
 192 
 193   jbyte _scavenge_root_state;
 194 
 195   NOT_PRODUCT(bool _has_debug_info; )
 196 
 197   // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)



 198   jint  _lock_count;
 199 
 200   // not_entrant method removal. Each mark_sweep pass will update
 201   // this mark to current sweep invocation count if it is seen on the
 202   // stack.  An not_entrant method can be removed when there is no
 203   // more activations, i.e., when the _stack_traversal_mark is less than
 204   // current sweep traversal index.
 205   long _stack_traversal_mark;
 206 
 207   ExceptionCache *_exception_cache;
 208   PcDescCache     _pc_desc_cache;
 209 
 210   // These are used for compiled synchronized native methods to
 211   // locate the owner and stack slot for the BasicLock so that we can
 212   // properly revoke the bias of the owner if necessary. They are
 213   // needed because there is no debug information for compiled native
 214   // wrappers and the oop maps are insufficient to allow
 215   // frame::retrieve_receiver() to work. Currently they are expected
 216   // to be byte offsets from the Java stack pointer for maximum code
 217   // sharing between platforms. Note that currently biased locking


 505 
 506   // tells whether frames described by this nmethod can be deoptimized
 507   // note: native wrappers cannot be deoptimized.
 508   bool can_be_deoptimized() const { return is_java_method(); }
 509 
 510   // Inline cache support
 511   void clear_inline_caches();
 512   void cleanup_inline_caches();
 513   bool inlinecache_check_contains(address addr) const {
 514     return (addr >= code_begin() && addr < verified_entry_point());
 515   }
 516 
 517   // unlink and deallocate this nmethod
 518   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
 519   // expected to use any other private methods/data in this class.
 520 
 521  protected:
 522   void flush();
 523 
 524  public:
 525   // If returning true, it is unsafe to remove this nmethod even though it is a zombie
 526   // nmethod, since the VM might have a reference to it. Should only be called from a  safepoint.

 527   bool is_locked_by_vm() const                    { return _lock_count >0; }
 528 
 529   // See comment at definition of _last_seen_on_stack
 530   void mark_as_seen_on_stack();
 531   bool can_not_entrant_be_converted();
 532 
 533   // Evolution support. We make old (discarded) compiled methods point to new methodOops.
 534   void set_method(methodOop method) { _method = method; }
 535 
 536   // GC support
 537   void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
 538                     bool unloading_occurred);
 539   bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
 540                   oop* root, bool unloading_occurred);
 541 
 542   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
 543                                      OopClosure* f);
 544   void oops_do(OopClosure* f) { oops_do(f, false); }
 545   void oops_do(OopClosure* f, bool do_strong_roots_only);
 546   bool detect_scavenge_root_oops();


 672   bool is_dependent_on_method(methodOop dependee);
 673 
 674   // is it ok to patch at address?
 675   bool is_patchable_at(address instr_address);
 676 
 677   // UseBiasedLocking support
 678   ByteSize native_receiver_sp_offset() {
 679     return _native_receiver_sp_offset;
 680   }
 681   ByteSize native_basic_lock_sp_offset() {
 682     return _native_basic_lock_sp_offset;
 683   }
 684 
 685   // support for code generation
 686   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 687   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 688   static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }
 689 
 690 };
 691 
 692 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method




 693 class nmethodLocker : public StackObj {
 694   nmethod* _nm;
 695 
 696  public:
 697 
 698   static void lock_nmethod(nmethod* nm);   // note: nm can be NULL



 699   static void unlock_nmethod(nmethod* nm); // (ditto)
 700 
 701   nmethodLocker(address pc); // derive nm from pc
 702   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
 703   nmethodLocker() { _nm = NULL; }
 704   ~nmethodLocker() { unlock_nmethod(_nm); }
 705 
 706   nmethod* code() { return _nm; }
 707   void set_code(nmethod* new_nm) {
 708     unlock_nmethod(_nm);   // note:  This works even if _nm==new_nm.
 709     _nm = new_nm;
 710     lock_nmethod(_nm);
 711   }
 712 };
 713 
 714 #endif // SHARE_VM_CODE_NMETHOD_HPP


 177   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 178   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 179 
 180   // Protected by Patching_lock
 181   unsigned char _state;                      // {alive, not_entrant, zombie, unloaded)
 182 
 183 #ifdef ASSERT
 184   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 185 #endif
 186 
 187   enum { alive        = 0,
 188          not_entrant  = 1, // uncommon trap has happened but activations may still exist
 189          zombie       = 2,
 190          unloaded     = 3 };
 191 
 192 
 193   jbyte _scavenge_root_state;
 194 
 195   NOT_PRODUCT(bool _has_debug_info; )
 196 
 197   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
 198   // and is not made into a zombie. However, once the nmethod is made into
 199   // a zombie, it will be locked one final time if CompiledMethodUnload
 200   // event processing needs to be done.
 201   jint  _lock_count;
 202 
 203   // not_entrant method removal. Each mark_sweep pass will update
 204   // this mark to current sweep invocation count if it is seen on the
 205   // stack.  An not_entrant method can be removed when there is no
 206   // more activations, i.e., when the _stack_traversal_mark is less than
 207   // current sweep traversal index.
 208   long _stack_traversal_mark;
 209 
 210   ExceptionCache *_exception_cache;
 211   PcDescCache     _pc_desc_cache;
 212 
 213   // These are used for compiled synchronized native methods to
 214   // locate the owner and stack slot for the BasicLock so that we can
 215   // properly revoke the bias of the owner if necessary. They are
 216   // needed because there is no debug information for compiled native
 217   // wrappers and the oop maps are insufficient to allow
 218   // frame::retrieve_receiver() to work. Currently they are expected
 219   // to be byte offsets from the Java stack pointer for maximum code
 220   // sharing between platforms. Note that currently biased locking


 508 
 509   // tells whether frames described by this nmethod can be deoptimized
 510   // note: native wrappers cannot be deoptimized.
 511   bool can_be_deoptimized() const { return is_java_method(); }
 512 
 513   // Inline cache support
 514   void clear_inline_caches();
 515   void cleanup_inline_caches();
 516   bool inlinecache_check_contains(address addr) const {
 517     return (addr >= code_begin() && addr < verified_entry_point());
 518   }
 519 
 520   // unlink and deallocate this nmethod
 521   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
 522   // expected to use any other private methods/data in this class.
 523 
 524  protected:
 525   void flush();
 526 
 527  public:
 528   // When true is returned, it is unsafe to remove this nmethod even if
 529   // it is a zombie, since the VM or the ServiceThread might still be
 530   // using it. Should only be called from a safepoint.
 531   bool is_locked_by_vm() const                    { return _lock_count >0; }
 532 
 533   // See comment at definition of _last_seen_on_stack
 534   void mark_as_seen_on_stack();
 535   bool can_not_entrant_be_converted();
 536 
 537   // Evolution support. We make old (discarded) compiled methods point to new methodOops.
 538   void set_method(methodOop method) { _method = method; }
 539 
 540   // GC support
 541   void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
 542                     bool unloading_occurred);
 543   bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
 544                   oop* root, bool unloading_occurred);
 545 
 546   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
 547                                      OopClosure* f);
 548   void oops_do(OopClosure* f) { oops_do(f, false); }
 549   void oops_do(OopClosure* f, bool do_strong_roots_only);
 550   bool detect_scavenge_root_oops();


 676   bool is_dependent_on_method(methodOop dependee);
 677 
 678   // is it ok to patch at address?
 679   bool is_patchable_at(address instr_address);
 680 
 681   // UseBiasedLocking support
 682   ByteSize native_receiver_sp_offset() {
 683     return _native_receiver_sp_offset;
 684   }
 685   ByteSize native_basic_lock_sp_offset() {
 686     return _native_basic_lock_sp_offset;
 687   }
 688 
 689   // support for code generation
 690   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 691   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 692   static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }
 693 
 694 };
 695 
 696 // Locks an nmethod so its code will not get removed and it will not
 697 // be made into a zombie, even if it is a not_entrant method. After the
 698 // nmethod becomes a zombie, if CompiledMethodUnload event processing
 699 // needs to be done, then lock_nmethod() is used directly to keep the
 700 // generated code from being reused too early.
 701 class nmethodLocker : public StackObj {
 702   nmethod* _nm;
 703 
 704  public:
 705 
 706   // note: nm can be NULL
 707   // Only JvmtiDeferredEvent::compiled_method_unload_event()
 708   // should pass zombie_ok == true.
 709   static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
 710   static void unlock_nmethod(nmethod* nm); // (ditto)
 711 
 712   nmethodLocker(address pc); // derive nm from pc
 713   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
 714   nmethodLocker() { _nm = NULL; }
 715   ~nmethodLocker() { unlock_nmethod(_nm); }
 716 
 717   nmethod* code() { return _nm; }
 718   void set_code(nmethod* new_nm) {
 719     unlock_nmethod(_nm);   // note:  This works even if _nm==new_nm.
 720     _nm = new_nm;
 721     lock_nmethod(_nm);
 722   }
 723 };
 724 
 725 #endif // SHARE_VM_CODE_NMETHOD_HPP