< prev index next >

src/hotspot/share/code/nmethod.hpp

Print this page
rev 54936 : [mq]: 8221734-v3


 102   int _speculations_offset;
 103   int _jvmci_data_offset;
 104 #endif
 105   int _nmethod_end_offset;
 106 
 107   int code_offset() const { return (address) code_begin() - header_begin(); }
 108 
 109   // location in frame (offset for sp) that deopt can store the original
 110   // pc during a deopt.
 111   int _orig_pc_offset;
 112 
 113   int _compile_id;                           // which compilation made this nmethod
 114   int _comp_level;                           // compilation level
 115 
 116   // protected by CodeCache_lock
 117   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
 118 
 119   // used by jvmti to track if an unload event has been posted for this nmethod.
 120   bool _unload_reported;
 121 
 122   // Protected by Patching_lock
 123   volatile signed char _state;               // {not_installed, in_use, not_entrant, zombie, unloaded}
 124 
 125 #ifdef ASSERT
 126   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 127 #endif
 128 
 129 #if INCLUDE_RTM_OPT
 130   // RTM state at compile time. Used during deoptimization to decide
 131   // whether to restart collecting RTM locking abort statistic again.
 132   RTMState _rtm_state;
 133 #endif
 134 
 135   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
 136   // and is not made into a zombie. However, once the nmethod is made into
 137   // a zombie, it will be locked one final time if CompiledMethodUnload
 138   // event processing needs to be done.
 139   volatile jint _lock_count;
 140 
 141   // not_entrant method removal. Each mark_sweep pass will update
 142   // this mark to current sweep invocation count if it is seen on the


 369   // used by jvmti to track if the unload event has been reported
 370   bool  unload_reported()                         { return _unload_reported; }
 371   void  set_unload_reported()                     { _unload_reported = true; }
 372 
 373   int get_state() const {
 374     return _state;
 375   }
 376 
 377   void  make_unloaded();
 378 
 379   bool has_dependencies()                         { return dependencies_size() != 0; }
 380   void flush_dependencies(bool delete_immediately);
 381   bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
 382   void set_has_flushed_dependencies()             {
 383     assert(!has_flushed_dependencies(), "should only happen once");
 384     _has_flushed_dependencies = 1;
 385   }
 386 
 387   int   comp_level() const                        { return _comp_level; }
 388 
 389   void unlink_from_method(bool acquire_lock);
 390 
 391   // Support for oops in scopes and relocs:
 392   // Note: index 0 is reserved for null.
 393   oop   oop_at(int index) const;
 394   oop*  oop_addr_at(int index) const {  // for GC
 395     // relocation indexes are biased by 1 (because 0 is reserved)
 396     assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
 397     assert(!_oops_are_stale, "oops are stale");
 398     return &oops_begin()[index - 1];
 399   }
 400 
 401   // Support for meta data in scopes and relocs:
 402   // Note: index 0 is reserved for null.
 403   Metadata*     metadata_at(int index) const      { return index == 0 ? NULL: *metadata_addr_at(index); }
 404   Metadata**  metadata_addr_at(int index) const {  // for GC
 405     // relocation indexes are biased by 1 (because 0 is reserved)
 406     assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
 407     return &metadata_begin()[index - 1];
 408   }
 409 




 102   int _speculations_offset;
 103   int _jvmci_data_offset;
 104 #endif
 105   int _nmethod_end_offset;
 106 
 107   int code_offset() const { return (address) code_begin() - header_begin(); }
 108 
 109   // location in frame (offset for sp) that deopt can store the original
 110   // pc during a deopt.
 111   int _orig_pc_offset;
 112 
 113   int _compile_id;                           // which compilation made this nmethod
 114   int _comp_level;                           // compilation level
 115 
 116   // protected by CodeCache_lock
 117   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
 118 
 119   // used by jvmti to track if an unload event has been posted for this nmethod.
 120   bool _unload_reported;
 121 
 122   // Protected by CompiledMethod_lock
 123   volatile signed char _state;               // {not_installed, in_use, not_entrant, zombie, unloaded}
 124 
 125 #ifdef ASSERT
 126   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 127 #endif
 128 
 129 #if INCLUDE_RTM_OPT
 130   // RTM state at compile time. Used during deoptimization to decide
 131   // whether to restart collecting RTM locking abort statistic again.
 132   RTMState _rtm_state;
 133 #endif
 134 
 135   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
 136   // and is not made into a zombie. However, once the nmethod is made into
 137   // a zombie, it will be locked one final time if CompiledMethodUnload
 138   // event processing needs to be done.
 139   volatile jint _lock_count;
 140 
 141   // not_entrant method removal. Each mark_sweep pass will update
 142   // this mark to current sweep invocation count if it is seen on the


 369   // used by jvmti to track if the unload event has been reported
 370   bool  unload_reported()                         { return _unload_reported; }
 371   void  set_unload_reported()                     { _unload_reported = true; }
 372 
 373   int get_state() const {
 374     return _state;
 375   }
 376 
 377   void  make_unloaded();
 378 
 379   bool has_dependencies()                         { return dependencies_size() != 0; }
 380   void flush_dependencies(bool delete_immediately);
 381   bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
 382   void set_has_flushed_dependencies()             {
 383     assert(!has_flushed_dependencies(), "should only happen once");
 384     _has_flushed_dependencies = 1;
 385   }
 386 
 387   int   comp_level() const                        { return _comp_level; }
 388 
 389   void unlink_from_method();
 390 
 391   // Support for oops in scopes and relocs:
 392   // Note: index 0 is reserved for null.
 393   oop   oop_at(int index) const;
 394   oop*  oop_addr_at(int index) const {  // for GC
 395     // relocation indexes are biased by 1 (because 0 is reserved)
 396     assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
 397     assert(!_oops_are_stale, "oops are stale");
 398     return &oops_begin()[index - 1];
 399   }
 400 
 401   // Support for meta data in scopes and relocs:
 402   // Note: index 0 is reserved for null.
 403   Metadata*     metadata_at(int index) const      { return index == 0 ? NULL: *metadata_addr_at(index); }
 404   Metadata**  metadata_addr_at(int index) const {  // for GC
 405     // relocation indexes are biased by 1 (because 0 is reserved)
 406     assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
 407     return &metadata_begin()[index - 1];
 408   }
 409 


< prev index next >