1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_CODE_NMETHOD_HPP
  26 #define SHARE_VM_CODE_NMETHOD_HPP
  27 
  28 #include "code/compiledMethod.hpp"
  29 
  30 class DepChange;
  31 class DirectiveSet;
  32 
  33 // nmethods (native methods) are the compiled code versions of Java methods.
  34 //
  35 // An nmethod contains:
  36 //  - header                 (the nmethod structure)
  37 //  [Relocation]
  38 //  - relocation information
  39 //  - constant part          (doubles, longs and floats used in nmethod)
  40 //  - oop table
  41 //  [Code]
  42 //  - code body
  43 //  - exception handler
  44 //  - stub code
  45 //  [Debugging information]
  46 //  - oop array
  47 //  - data array
  48 //  - pcs
  49 //  [Exception handler table]
  50 //  - handler entry point array
  51 //  [Implicit Null Pointer exception table]
  52 //  - implicit null table array
  53 
  54 class nmethod : public CompiledMethod {
  55   friend class VMStructs;
  56   friend class JVMCIVMStructs;
  57   friend class NMethodSweeper;
  58   friend class CodeCache;  // scavengable oops
  59  private:
  60 
  61   // Shared fields for all nmethod's
  62   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
  63   jmethodID _jmethod_id;       // Cache of method()->jmethod_id()
  64 
  65 #if INCLUDE_JVMCI
  66   // A weak reference to an InstalledCode object associated with
  67   // this nmethod.
  68   jweak     _jvmci_installed_code;
  69 
  70   // A weak reference to a SpeculationLog object associated with
  71   // this nmethod.
  72   jweak     _speculation_log;
  73 
  74   // Determines whether this nmethod is unloaded when the
  75   // referent in _jvmci_installed_code is cleared. This
  76   // will be false if the referent is initialized to a
  77   // HotSpotNMethod object whose isDefault field is true.
  78   // That is, installed code other than a "default"
  79   // HotSpotNMethod causes nmethod unloading.
  80   // This field is ignored once _jvmci_installed_code is NULL.
  81   bool _jvmci_installed_code_triggers_invalidation;
  82 #endif
  83 
  84   // To support simple linked-list chaining of nmethods:
  85   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
  86 
  87   static nmethod* volatile _oops_do_mark_nmethods;
  88   nmethod*        volatile _oops_do_mark_link;
  89 
  90   // offsets for entry points
  91   address _entry_point;                      // entry point with class check
  92   address _verified_entry_point;             // entry point without class check
  93   address _verified_value_entry_point;       // value type entry point without class check
  94   address _osr_entry_point;                  // entry point for on stack replacement
  95 
  96   // Offsets for different nmethod parts
  97   int  _exception_offset;
  98   // Offset of the unwind handler if it exists
  99   int _unwind_handler_offset;
 100 
 101   int _consts_offset;
 102   int _stub_offset;
 103   int _oops_offset;                       // offset to where embedded oop table begins (inside data)
 104   int _metadata_offset;                   // embedded meta data table
 105   int _scopes_data_offset;
 106   int _scopes_pcs_offset;
 107   int _dependencies_offset;
 108   int _handler_table_offset;
 109   int _nul_chk_table_offset;
 110   int _nmethod_end_offset;
 111 
 112   int code_offset() const { return (address) code_begin() - header_begin(); }
 113 
 114   // location in frame (offset for sp) that deopt can store the original
 115   // pc during a deopt.
 116   int _orig_pc_offset;
 117 
 118   int _compile_id;                           // which compilation made this nmethod
 119   int _comp_level;                           // compilation level
 120 
 121   // protected by CodeCache_lock
 122   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
 123 
 124   // used by jvmti to track if an unload event has been posted for this nmethod.
 125   bool _unload_reported;
 126 
 127   // Protected by Patching_lock
 128   volatile signed char _state;               // {not_installed, in_use, not_entrant, zombie, unloaded}
 129 
 130 #ifdef ASSERT
 131   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 132 #endif
 133 
 134   jbyte _scavenge_root_state;
 135 
 136 #if INCLUDE_RTM_OPT
 137   // RTM state at compile time. Used during deoptimization to decide
 138   // whether to restart collecting RTM locking abort statistic again.
 139   RTMState _rtm_state;
 140 #endif
 141 
 142   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
 143   // and is not made into a zombie. However, once the nmethod is made into
 144   // a zombie, it will be locked one final time if CompiledMethodUnload
 145   // event processing needs to be done.
 146   volatile jint _lock_count;
 147 
 148   // not_entrant method removal. Each mark_sweep pass will update
 149   // this mark to current sweep invocation count if it is seen on the
 150   // stack.  An not_entrant method can be removed when there are no
 151   // more activations, i.e., when the _stack_traversal_mark is less than
 152   // current sweep traversal index.
 153   volatile long _stack_traversal_mark;
 154 
 155   // The _hotness_counter indicates the hotness of a method. The higher
 156   // the value the hotter the method. The hotness counter of a nmethod is
 157   // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
 158   // is active while stack scanning (mark_active_nmethods()). The hotness
 159   // counter is decreased (by 1) while sweeping.
 160   int _hotness_counter;
 161 
 162   // Local state used to keep track of whether unloading is happening or not
 163   volatile uint8_t _is_unloading_state;
 164 
 165   // These are used for compiled synchronized native methods to
 166   // locate the owner and stack slot for the BasicLock so that we can
 167   // properly revoke the bias of the owner if necessary. They are
 168   // needed because there is no debug information for compiled native
 169   // wrappers and the oop maps are insufficient to allow
 170   // frame::retrieve_receiver() to work. Currently they are expected
 171   // to be byte offsets from the Java stack pointer for maximum code
 172   // sharing between platforms. Note that currently biased locking
 173   // will never cause Class instances to be biased but this code
 174   // handles the static synchronized case as well.
 175   // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
 176   // for non-static native wrapper frames.
 177   ByteSize _native_receiver_sp_offset;
 178   ByteSize _native_basic_lock_sp_offset;
 179 
 180   friend class nmethodLocker;
 181 
 182   // For native wrappers
 183   nmethod(Method* method,
 184           CompilerType type,
 185           int nmethod_size,
 186           int compile_id,
 187           CodeOffsets* offsets,
 188           CodeBuffer *code_buffer,
 189           int frame_size,
 190           ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
 191           ByteSize basic_lock_sp_offset,       /* synchronized natives only */
 192           OopMapSet* oop_maps);
 193 
 194   // Creation support
 195   nmethod(Method* method,
 196           CompilerType type,
 197           int nmethod_size,
 198           int compile_id,
 199           int entry_bci,
 200           CodeOffsets* offsets,
 201           int orig_pc_offset,
 202           DebugInformationRecorder *recorder,
 203           Dependencies* dependencies,
 204           CodeBuffer *code_buffer,
 205           int frame_size,
 206           OopMapSet* oop_maps,
 207           ExceptionHandlerTable* handler_table,
 208           ImplicitExceptionTable* nul_chk_table,
 209           AbstractCompiler* compiler,
 210           int comp_level
 211 #if INCLUDE_JVMCI
 212           , jweak installed_code,
 213           jweak speculation_log
 214 #endif
 215           );
 216 
 217   // helper methods
 218   void* operator new(size_t size, int nmethod_size, int comp_level) throw();
 219 
 220   const char* reloc_string_for(u_char* begin, u_char* end);
 221   // Returns true if this thread changed the state of the nmethod or
 222   // false if another thread performed the transition.
 223   bool make_not_entrant_or_zombie(int state);
 224   bool make_entrant() { Unimplemented(); return false; }
 225   void inc_decompile_count();
 226 
 227   // Inform external interfaces that a compiled method has been unloaded
 228   void post_compiled_method_unload();
 229 
 230   // Initailize fields to their default values
 231   void init_defaults();
 232 
 233   // Offsets
 234   int content_offset() const                  { return content_begin() - header_begin(); }
 235   int data_offset() const                     { return _data_offset; }
 236 
 237   address header_end() const                  { return (address)    header_begin() + header_size(); }
 238 
 239  public:
 240   // create nmethod with entry_bci
 241   static nmethod* new_nmethod(const methodHandle& method,
 242                               int compile_id,
 243                               int entry_bci,
 244                               CodeOffsets* offsets,
 245                               int orig_pc_offset,
 246                               DebugInformationRecorder* recorder,
 247                               Dependencies* dependencies,
 248                               CodeBuffer *code_buffer,
 249                               int frame_size,
 250                               OopMapSet* oop_maps,
 251                               ExceptionHandlerTable* handler_table,
 252                               ImplicitExceptionTable* nul_chk_table,
 253                               AbstractCompiler* compiler,
 254                               int comp_level
 255 #if INCLUDE_JVMCI
 256                               , jweak installed_code = NULL,
 257                               jweak speculation_log = NULL
 258 #endif
 259   );
 260 
 261   static nmethod* new_native_nmethod(const methodHandle& method,
 262                                      int compile_id,
 263                                      CodeBuffer *code_buffer,
 264                                      int vep_offset,
 265                                      int frame_complete,
 266                                      int frame_size,
 267                                      ByteSize receiver_sp_offset,
 268                                      ByteSize basic_lock_sp_offset,
 269                                      OopMapSet* oop_maps);
 270 
 271   // type info
 272   bool is_nmethod() const                         { return true; }
 273   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
 274 
 275   // boundaries for different parts
 276   address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
 277   address consts_end            () const          { return           code_begin()                           ; }
 278   address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
 279   address stub_end              () const          { return           header_begin() + _oops_offset          ; }
 280   address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
 281   address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
 282   oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
 283   oop*    oops_end              () const          { return (oop*)   (header_begin() + _metadata_offset)     ; }
 284 
 285   Metadata** metadata_begin   () const            { return (Metadata**)  (header_begin() + _metadata_offset)     ; }
 286   Metadata** metadata_end     () const            { return (Metadata**)  _scopes_data_begin; }
 287 
 288   address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
 289   PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
 290   PcDesc* scopes_pcs_end        () const          { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
 291   address dependencies_begin    () const          { return           header_begin() + _dependencies_offset  ; }
 292   address dependencies_end      () const          { return           header_begin() + _handler_table_offset ; }
 293   address handler_table_begin   () const          { return           header_begin() + _handler_table_offset ; }
 294   address handler_table_end     () const          { return           header_begin() + _nul_chk_table_offset ; }
 295   address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
 296   address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
 297 
 298   // Sizes
 299   int oops_size         () const                  { return (address)  oops_end         () - (address)  oops_begin         (); }
 300   int metadata_size     () const                  { return (address)  metadata_end     () - (address)  metadata_begin     (); }
 301   int dependencies_size () const                  { return            dependencies_end () -            dependencies_begin (); }
 302 
 303   int     oops_count() const { assert(oops_size() % oopSize == 0, "");  return (oops_size() / oopSize) + 1; }
 304   int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
 305 
 306   int total_size        () const;
 307 
 308   void dec_hotness_counter()        { _hotness_counter--; }
 309   void set_hotness_counter(int val) { _hotness_counter = val; }
 310   int  hotness_counter() const      { return _hotness_counter; }
 311 
 312   // Containment
 313   bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 314   bool metadata_contains     (Metadata** addr) const   { return metadata_begin     () <= addr && addr < metadata_end     (); }
 315   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 316   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 317 
 318   // entry points
 319   address entry_point() const                     { return _entry_point;             } // normal entry point
 320   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 321   address verified_value_entry_point() const      { return _verified_value_entry_point; } // pass value type args as oops
 322 
 323   // flag accessing and manipulation
 324   bool  is_not_installed() const                  { return _state == not_installed; }
 325   bool  is_in_use() const                         { return _state <= in_use; }
 326   bool  is_alive() const                          { return _state < zombie; }
 327   bool  is_not_entrant() const                    { return _state == not_entrant; }
 328   bool  is_zombie() const                         { return _state == zombie; }
 329   bool  is_unloaded() const                       { return _state == unloaded; }
 330 
 331   void clear_unloading_state();
 332   virtual bool is_unloading();
 333   virtual void do_unloading(bool unloading_occurred);
 334 
 335 #if INCLUDE_RTM_OPT
 336   // rtm state accessing and manipulating
 337   RTMState  rtm_state() const                     { return _rtm_state; }
 338   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 339 #endif
 340 
 341   void make_in_use()                              { _state = in_use; }
 342   // Make the nmethod non entrant. The nmethod will continue to be
 343   // alive.  It is used when an uncommon trap happens.  Returns true
 344   // if this thread changed the state of the nmethod or false if
 345   // another thread performed the transition.
 346   bool  make_not_entrant() {
 347     assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
 348     return make_not_entrant_or_zombie(not_entrant);
 349   }
 350   bool  make_not_used()    { return make_not_entrant(); }
 351   bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 352 
 353   // used by jvmti to track if the unload event has been reported
 354   bool  unload_reported()                         { return _unload_reported; }
 355   void  set_unload_reported()                     { _unload_reported = true; }
 356 
 357   int get_state() const {
 358     return _state;
 359   }
 360 
 361   void  make_unloaded();
 362 
 363   bool has_dependencies()                         { return dependencies_size() != 0; }
 364   void flush_dependencies(bool delete_immediately);
 365   bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
 366   void set_has_flushed_dependencies()             {
 367     assert(!has_flushed_dependencies(), "should only happen once");
 368     _has_flushed_dependencies = 1;
 369   }
 370 
 371   int   comp_level() const                        { return _comp_level; }
 372 
 373   // Support for oops in scopes and relocs:
 374   // Note: index 0 is reserved for null.
 375   oop   oop_at(int index) const;
 376   oop*  oop_addr_at(int index) const {  // for GC
 377     // relocation indexes are biased by 1 (because 0 is reserved)
 378     assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
 379     assert(!_oops_are_stale, "oops are stale");
 380     return &oops_begin()[index - 1];
 381   }
 382 
 383   // Support for meta data in scopes and relocs:
 384   // Note: index 0 is reserved for null.
 385   Metadata*     metadata_at(int index) const      { return index == 0 ? NULL: *metadata_addr_at(index); }
 386   Metadata**  metadata_addr_at(int index) const {  // for GC
 387     // relocation indexes are biased by 1 (because 0 is reserved)
 388     assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
 389     return &metadata_begin()[index - 1];
 390   }
 391 
 392   void copy_values(GrowableArray<jobject>* oops);
 393   void copy_values(GrowableArray<Metadata*>* metadata);
 394 
 395   // Relocation support
 396 private:
 397   void fix_oop_relocations(address begin, address end, bool initialize_immediates);
 398   inline void initialize_immediate_oop(oop* dest, jobject handle);
 399 
 400 public:
 401   void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
 402   void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
 403 
 404   // Scavengable oop support
 405   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
 406  protected:
 407   enum { sl_on_list = 0x01, sl_marked = 0x10 };
 408   void  set_on_scavenge_root_list()                    { _scavenge_root_state = sl_on_list; }
 409   void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
 410   // assertion-checking and pruning logic uses the bits of _scavenge_root_state
 411 #ifndef PRODUCT
 412   void  set_scavenge_root_marked()                     { _scavenge_root_state |= sl_marked; }
 413   void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~sl_marked; }
 414   bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ sl_on_list) == 0; }
 415   // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
 416 #endif //PRODUCT
 417   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
 418   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
 419 
 420  public:
 421 
 422   // Sweeper support
 423   long  stack_traversal_mark()                    { return _stack_traversal_mark; }
 424   void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
 425 
 426   // implicit exceptions support
 427   address continuation_for_implicit_exception(address pc);
 428 
 429   // On-stack replacement support
 430   int   osr_entry_bci() const                     { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
 431   address  osr_entry() const                      { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
 432   void  invalidate_osr_method();
 433   nmethod* osr_link() const                       { return _osr_link; }
 434   void     set_osr_link(nmethod *n)               { _osr_link = n; }
 435 
 436   // Verify calls to dead methods have been cleaned.
 437   void verify_clean_inline_caches();
 438 
 439   // unlink and deallocate this nmethod
 440   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
 441   // expected to use any other private methods/data in this class.
 442 
 443  protected:
 444   void flush();
 445 
 446  public:
 447   // When true is returned, it is unsafe to remove this nmethod even if
 448   // it is a zombie, since the VM or the ServiceThread might still be
 449   // using it.
 450   bool is_locked_by_vm() const                    { return _lock_count >0; }
 451 
 452   // See comment at definition of _last_seen_on_stack
 453   void mark_as_seen_on_stack();
 454   bool can_convert_to_zombie();
 455 
 456   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
 457   void set_method(Method* method) { _method = method; }
 458 
 459 #if INCLUDE_JVMCI
 460   // Gets the InstalledCode object associated with this nmethod
 461   // which may be NULL if this nmethod was not compiled by JVMCI
 462   // or the weak reference has been cleared.
 463   oop jvmci_installed_code();
 464 
 465   // Copies the value of the name field in the InstalledCode
 466   // object (if any) associated with this nmethod into buf.
 467   // Returns the value of buf if it was updated otherwise NULL.
 468   char* jvmci_installed_code_name(char* buf, size_t buflen) const;
 469 
 470   // Updates the state of the InstalledCode (if any) associated with
 471   // this nmethod based on the current value of _state.
 472   void maybe_invalidate_installed_code();
 473 
 474   // Deoptimizes the nmethod (if any) in the address field of a given
 475   // InstalledCode object. The address field is zeroed upon return.
 476   static void invalidate_installed_code(Handle installed_code, TRAPS);
 477 
 478   // Gets the SpeculationLog object associated with this nmethod
 479   // which may be NULL if this nmethod was not compiled by JVMCI
 480   // or the weak reference has been cleared.
 481   oop speculation_log();
 482 
 483  private:
 484   // Deletes the weak reference (if any) to the InstalledCode object
 485   // associated with this nmethod.
 486   void clear_jvmci_installed_code();
 487 
 488   // Deletes the weak reference (if any) to the SpeculationLog object
 489   // associated with this nmethod.
 490   void clear_speculation_log();
 491 
 492  public:
 493 #endif
 494 
 495  public:
 496   void oops_do(OopClosure* f) { oops_do(f, false); }
 497   void oops_do(OopClosure* f, bool allow_zombie);
 498   bool detect_scavenge_root_oops();
 499   void verify_scavenge_root_oops() PRODUCT_RETURN;
 500 
 501   bool test_set_oops_do_mark();
 502   static void oops_do_marking_prologue();
 503   static void oops_do_marking_epilogue();
 504   static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
 505   bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
 506 
 507  private:
 508   ScopeDesc* scope_desc_in(address begin, address end);
 509 
 510   address* orig_pc_addr(const frame* fr);
 511 
 512  public:
 513   // copying of debugging information
 514   void copy_scopes_pcs(PcDesc* pcs, int count);
 515   void copy_scopes_data(address buffer, int size);
 516 
 517   // Accessor/mutator for the original pc of a frame before a frame was deopted.
 518   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
 519   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 520 
 521   // jvmti support:
 522   void post_compiled_method_load_event();
 523   jmethodID get_and_cache_jmethod_id();
 524 
 525   // verify operations
 526   void verify();
 527   void verify_scopes();
 528   void verify_interrupt_point(address interrupt_point);
 529 
 530   // printing support
 531   void print()                          const;
 532   void print_relocations()                        PRODUCT_RETURN;
 533   void print_pcs()                                PRODUCT_RETURN;
 534   void print_scopes()                             PRODUCT_RETURN;
 535   void print_dependencies()                       PRODUCT_RETURN;
 536   void print_value_on(outputStream* st) const     PRODUCT_RETURN;
 537   void print_calls(outputStream* st)              PRODUCT_RETURN;
 538   void print_handler_table()                      PRODUCT_RETURN;
 539   void print_nul_chk_table()                      PRODUCT_RETURN;
 540   void print_recorded_oops()                      PRODUCT_RETURN;
 541   void print_recorded_metadata()                  PRODUCT_RETURN;
 542 
 543   void maybe_print_nmethod(DirectiveSet* directive);
 544   void print_nmethod(bool print_code);
 545 
 546   // need to re-define this from CodeBlob else the overload hides it
 547   virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
 548   void print_on(outputStream* st, const char* msg) const;
 549 
 550   // Logging
 551   void log_identity(xmlStream* log) const;
 552   void log_new_nmethod() const;
 553   void log_state_change() const;
 554 
 555   // Prints block-level comments, including nmethod specific block labels:
 556   virtual void print_block_comment(outputStream* stream, address block_begin) const {
 557     print_nmethod_labels(stream, block_begin);
 558     CodeBlob::print_block_comment(stream, block_begin);
 559   }
 560   void print_nmethod_labels(outputStream* stream, address block_begin) const;
 561 
 562   // Prints a comment for one native instruction (reloc info, pc desc)
 563   void print_code_comment_on(outputStream* st, int column, address begin, address end);
 564   static void print_statistics() PRODUCT_RETURN;
 565 
 566   // Compiler task identification.  Note that all OSR methods
 567   // are numbered in an independent sequence if CICountOSR is true,
 568   // and native method wrappers are also numbered independently if
 569   // CICountNative is true.
 570   virtual int compile_id() const { return _compile_id; }
 571   const char* compile_kind() const;
 572 
 573   // tells if any of this method's dependencies have been invalidated
 574   // (this is expensive!)
 575   static void check_all_dependencies(DepChange& changes);
 576 
 577   // tells if this compiled method is dependent on the given changes,
 578   // and the changes have invalidated it
 579   bool check_dependency_on(DepChange& changes);
 580 
 581   // Evolution support. Tells if this compiled method is dependent on any of
 582   // methods m() of class dependee, such that if m() in dependee is replaced,
 583   // this compiled method will have to be deoptimized.
 584   bool is_evol_dependent_on(Klass* dependee);
 585 
 586   // Fast breakpoint support. Tells if this compiled method is
 587   // dependent on the given method. Returns true if this nmethod
 588   // corresponds to the given method as well.
 589   virtual bool is_dependent_on_method(Method* dependee);
 590 
 591   // is it ok to patch at address?
 592   bool is_patchable_at(address instr_address);
 593 
 594   // UseBiasedLocking support
 595   ByteSize native_receiver_sp_offset() {
 596     return _native_receiver_sp_offset;
 597   }
 598   ByteSize native_basic_lock_sp_offset() {
 599     return _native_basic_lock_sp_offset;
 600   }
 601 
 602   // support for code generation
 603   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 604   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 605   static int state_offset()                       { return offset_of(nmethod, _state); }
 606 
 607   virtual void metadata_do(void f(Metadata*));
 608 
 609   NativeCallWrapper* call_wrapper_at(address call) const;
 610   NativeCallWrapper* call_wrapper_before(address return_pc) const;
 611   address call_instruction_address(address pc) const;
 612 
 613   virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const;
 614   virtual CompiledStaticCall* compiledStaticCall_at(address addr) const;
 615   virtual CompiledStaticCall* compiledStaticCall_before(address addr) const;
 616 };
 617 
 618 // Locks an nmethod so its code will not get removed and it will not
 619 // be made into a zombie, even if it is a not_entrant method. After the
 620 // nmethod becomes a zombie, if CompiledMethodUnload event processing
 621 // needs to be done, then lock_nmethod() is used directly to keep the
 622 // generated code from being reused too early.
 623 class nmethodLocker : public StackObj {
 624   CompiledMethod* _nm;
 625 
 626  public:
 627 
 628   // note: nm can be NULL
 629   // Only JvmtiDeferredEvent::compiled_method_unload_event()
 630   // should pass zombie_ok == true.
 631   static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false);
 632   static void unlock_nmethod(CompiledMethod* nm); // (ditto)
 633 
 634   nmethodLocker(address pc); // derive nm from pc
 635   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
 636   nmethodLocker(CompiledMethod *nm) {
 637     _nm = nm;
 638     lock(_nm);
 639   }
 640 
 641   static void lock(CompiledMethod* method) {
 642     if (method == NULL) return;
 643     lock_nmethod(method);
 644   }
 645 
 646   static void unlock(CompiledMethod* method) {
 647     if (method == NULL) return;
 648     unlock_nmethod(method);
 649   }
 650 
 651   nmethodLocker() { _nm = NULL; }
 652   ~nmethodLocker() {
 653     unlock(_nm);
 654   }
 655 
 656   CompiledMethod* code() { return _nm; }
 657   void set_code(CompiledMethod* new_nm) {
 658     unlock(_nm);   // note:  This works even if _nm==new_nm.
 659     _nm = new_nm;
 660     lock(_nm);
 661   }
 662 };
 663 
 664 #endif // SHARE_VM_CODE_NMETHOD_HPP