1 /*
   2  * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 // This class is used internally by nmethods, to cache
  26 // exception/pc/handler information.
  27 
  28 class ExceptionCache : public CHeapObj {
  29   friend class VMStructs;
  30  private:
  31   static address _unwind_handler;
  32   enum { cache_size = 16 };
  33   klassOop _exception_type;
  34   address  _pc[cache_size];
  35   address  _handler[cache_size];
  36   int      _count;
  37   ExceptionCache* _next;
  38 
  39   address pc_at(int index)                     { assert(index >= 0 && index < count(),""); return _pc[index]; }
  40   void    set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  41   address handler_at(int index)                { assert(index >= 0 && index < count(),""); return _handler[index]; }
  42   void    set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  43   int     count()                              { return _count; }
  44   void    increment_count()                    { _count++; }
  45 
  46  public:
  47 
  48   ExceptionCache(Handle exception, address pc, address handler);
  49 
  50   klassOop  exception_type()                { return _exception_type; }
  51   klassOop* exception_type_addr()           { return &_exception_type; }
  52   ExceptionCache* next()                    { return _next; }
  53   void      set_next(ExceptionCache *ec)    { _next = ec; }
  54 
  55   address match(Handle exception, address pc);
  56   bool    match_exception_with_space(Handle exception) ;
  57   address test_address(address addr);
  58   bool    add_address_and_handler(address addr, address handler) ;
  59 
  60   static address unwind_handler() { return _unwind_handler; }
  61 };
  62 
  63 
  64 // cache pc descs found in earlier inquiries
  65 class PcDescCache VALUE_OBJ_CLASS_SPEC {
  66   friend class VMStructs;
  67  private:
  68   enum { cache_size = 4 };
  69   PcDesc* _last_pc_desc;         // most recent pc_desc found
  70   PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
  71  public:
  72   PcDescCache() { debug_only(_last_pc_desc = NULL); }
  73   void    reset_to(PcDesc* initial_pc_desc);
  74   PcDesc* find_pc_desc(int pc_offset, bool approximate);
  75   void    add_pc_desc(PcDesc* pc_desc);
  76   PcDesc* last_pc_desc() { return _last_pc_desc; }
  77 };
  78 
  79 
  80 // nmethods (native methods) are the compiled code versions of Java methods.
  81 
  82 struct nmFlags {
  83   friend class VMStructs;
  84   unsigned int version:8;                    // version number (0 = first version)
  85   unsigned int level:4;                      // optimization level
  86   unsigned int age:4;                        // age (in # of sweep steps)
  87 
  88   unsigned int state:2;                      // {alive, zombie, unloaded)
  89 
  90   unsigned int isUncommonRecompiled:1;       // recompiled because of uncommon trap?
  91   unsigned int isToBeRecompiled:1;           // to be recompiled as soon as it matures
  92   unsigned int hasFlushedDependencies:1;     // Used for maintenance of dependencies
  93   unsigned int markedForReclamation:1;       // Used by NMethodSweeper
  94 
  95   unsigned int has_unsafe_access:1;          // May fault due to unsafe access.
  96   unsigned int has_method_handle_invokes:1;  // Has this method MethodHandle invokes?
  97 
  98   unsigned int speculatively_disconnected:1; // Marked for potential unload
  99 
 100   void clear();
 101 };
 102 
 103 
 104 // A nmethod contains:
 105 //  - header                 (the nmethod structure)
 106 //  [Relocation]
 107 //  - relocation information
 108 //  - constant part          (doubles, longs and floats used in nmethod)
 109 //  - oop table
 110 //  [Code]
 111 //  - code body
 112 //  - exception handler
 113 //  - stub code
 114 //  [Debugging information]
 115 //  - oop array
 116 //  - data array
 117 //  - pcs
 118 //  [Exception handler table]
 119 //  - handler entry point array
 120 //  [Implicit Null Pointer exception table]
 121 //  - implicit null table array
 122 
 123 class Dependencies;
 124 class ExceptionHandlerTable;
 125 class ImplicitExceptionTable;
 126 class AbstractCompiler;
 127 class xmlStream;
 128 
 129 class nmethod : public CodeBlob {
 130   friend class VMStructs;
 131   friend class NMethodSweeper;
 132   friend class CodeCache;  // non-perm oops
 133  private:
 134   // Shared fields for all nmethod's
 135   static int _zombie_instruction_size;
 136 
 137   methodOop _method;
 138   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
 139 
 140   // To support simple linked-list chaining of nmethods:
 141   nmethod*  _osr_link;         // from instanceKlass::osr_nmethods_head
 142   nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
 143   nmethod*  _saved_nmethod_link; // from CodeCache::speculatively_disconnect
 144 
 145   static nmethod* volatile _oops_do_mark_nmethods;
 146   nmethod*        volatile _oops_do_mark_link;
 147 
 148   AbstractCompiler* _compiler; // The compiler which compiled this nmethod
 149 
 150   // Offsets for different nmethod parts
 151   int _exception_offset;
 152   // All deoptee's will resume execution at this location described by
 153   // this offset.
 154   int _deoptimize_offset;
 155   // All deoptee's at a MethodHandle call site will resume execution
 156   // at this location described by this offset.
 157   int _deoptimize_mh_offset;
 158   // Offset of the unwind handler if it exists
 159   int _unwind_handler_offset;
 160 
 161 #ifdef HAVE_DTRACE_H
 162   int _trap_offset;
 163 #endif // def HAVE_DTRACE_H
 164   int _stub_offset;
 165   int _consts_offset;
 166   int _oops_offset;                       // offset to where embedded oop table begins (inside data)
 167   int _scopes_data_offset;
 168   int _scopes_pcs_offset;
 169   int _dependencies_offset;
 170   int _handler_table_offset;
 171   int _nul_chk_table_offset;
 172   int _nmethod_end_offset;
 173 
 174   // location in frame (offset for sp) that deopt can store the original
 175   // pc during a deopt.
 176   int _orig_pc_offset;
 177 
 178   int _compile_id;                     // which compilation made this nmethod
 179   int _comp_level;                     // compilation level
 180 
 181   // offsets for entry points
 182   address _entry_point;                // entry point with class check
 183   address _verified_entry_point;       // entry point without class check
 184   address _osr_entry_point;            // entry point for on stack replacement
 185 
 186   nmFlags flags;           // various flags to keep track of nmethod state
 187   bool _markedForDeoptimization;       // Used for stack deoptimization
 188   enum { alive        = 0,
 189          not_entrant  = 1, // uncommon trap has happened but activations may still exist
 190          zombie       = 2,
 191          unloaded     = 3 };
 192 
 193   // used by jvmti to track if an unload event has been posted for this nmethod.
 194   bool _unload_reported;
 195 
 196   jbyte _scavenge_root_state;
 197 
 198   NOT_PRODUCT(bool _has_debug_info; )
 199 
 200   // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
 201   jint  _lock_count;
 202 
 203   // not_entrant method removal. Each mark_sweep pass will update
 204   // this mark to current sweep invocation count if it is seen on the
 205   // stack.  An not_entrant method can be removed when there is no
 206   // more activations, i.e., when the _stack_traversal_mark is less than
 207   // current sweep traversal index.
 208   long _stack_traversal_mark;
 209 
 210   ExceptionCache *_exception_cache;
 211   PcDescCache     _pc_desc_cache;
 212 
 213   // These are only used for compiled synchronized native methods to
 214   // locate the owner and stack slot for the BasicLock so that we can
 215   // properly revoke the bias of the owner if necessary. They are
 216   // needed because there is no debug information for compiled native
 217   // wrappers and the oop maps are insufficient to allow
 218   // frame::retrieve_receiver() to work. Currently they are expected
 219   // to be byte offsets from the Java stack pointer for maximum code
 220   // sharing between platforms. Note that currently biased locking
 221   // will never cause Class instances to be biased but this code
 222   // handles the static synchronized case as well.
 223   ByteSize _compiled_synchronized_native_basic_lock_owner_sp_offset;
 224   ByteSize _compiled_synchronized_native_basic_lock_sp_offset;
 225 
 226   friend class nmethodLocker;
 227 
 228   // For native wrappers
 229   nmethod(methodOop method,
 230           int nmethod_size,
 231           CodeOffsets* offsets,
 232           CodeBuffer *code_buffer,
 233           int frame_size,
 234           ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
 235           ByteSize basic_lock_sp_offset,       /* synchronized natives only */
 236           OopMapSet* oop_maps);
 237 
 238 #ifdef HAVE_DTRACE_H
 239   // For native wrappers
 240   nmethod(methodOop method,
 241           int nmethod_size,
 242           CodeOffsets* offsets,
 243           CodeBuffer *code_buffer,
 244           int frame_size);
 245 #endif // def HAVE_DTRACE_H
 246 
 247   // Creation support
 248   nmethod(methodOop method,
 249           int nmethod_size,
 250           int compile_id,
 251           int entry_bci,
 252           CodeOffsets* offsets,
 253           int orig_pc_offset,
 254           DebugInformationRecorder *recorder,
 255           Dependencies* dependencies,
 256           CodeBuffer *code_buffer,
 257           int frame_size,
 258           OopMapSet* oop_maps,
 259           ExceptionHandlerTable* handler_table,
 260           ImplicitExceptionTable* nul_chk_table,
 261           AbstractCompiler* compiler,
 262           int comp_level);
 263 
 264   // helper methods
 265   void* operator new(size_t size, int nmethod_size);
 266 
 267   const char* reloc_string_for(u_char* begin, u_char* end);
 268   // Returns true if this thread changed the state of the nmethod or
 269   // false if another thread performed the transition.
 270   bool make_not_entrant_or_zombie(unsigned int state);
 271   void inc_decompile_count();
 272 
 273   // used to check that writes to nmFlags are done consistently.
 274   static void check_safepoint() PRODUCT_RETURN;
 275 
 276   // Used to manipulate the exception cache
 277   void add_exception_cache_entry(ExceptionCache* new_entry);
 278   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 279 
 280   // Inform external interfaces that a compiled method has been unloaded
 281   inline void post_compiled_method_unload();
 282 
 283  public:
 284   // create nmethod with entry_bci
 285   static nmethod* new_nmethod(methodHandle method,
 286                               int compile_id,
 287                               int entry_bci,
 288                               CodeOffsets* offsets,
 289                               int orig_pc_offset,
 290                               DebugInformationRecorder* recorder,
 291                               Dependencies* dependencies,
 292                               CodeBuffer *code_buffer,
 293                               int frame_size,
 294                               OopMapSet* oop_maps,
 295                               ExceptionHandlerTable* handler_table,
 296                               ImplicitExceptionTable* nul_chk_table,
 297                               AbstractCompiler* compiler,
 298                               int comp_level);
 299 
 300   static nmethod* new_native_nmethod(methodHandle method,
 301                                      CodeBuffer *code_buffer,
 302                                      int vep_offset,
 303                                      int frame_complete,
 304                                      int frame_size,
 305                                      ByteSize receiver_sp_offset,
 306                                      ByteSize basic_lock_sp_offset,
 307                                      OopMapSet* oop_maps);
 308 
 309 #ifdef HAVE_DTRACE_H
 310   // The method we generate for a dtrace probe has to look
 311   // like an nmethod as far as the rest of the system is concerned
 312   // which is somewhat unfortunate.
 313   static nmethod* new_dtrace_nmethod(methodHandle method,
 314                                      CodeBuffer *code_buffer,
 315                                      int vep_offset,
 316                                      int trap_offset,
 317                                      int frame_complete,
 318                                      int frame_size);
 319 
 320   int trap_offset() const      { return _trap_offset; }
 321   address trap_address() const { return code_begin() + _trap_offset; }
 322 
 323 #endif // def HAVE_DTRACE_H
 324 
 325   // accessors
 326   methodOop method() const                        { return _method; }
 327   AbstractCompiler* compiler() const              { return _compiler; }
 328 
 329 #ifndef PRODUCT
 330   bool has_debug_info() const                     { return _has_debug_info; }
 331   void set_has_debug_info(bool f)                 { _has_debug_info = false; }
 332 #endif // NOT PRODUCT
 333 
 334   // type info
 335   bool is_nmethod() const                         { return true; }
 336   bool is_java_method() const                     { return !method()->is_native(); }
 337   bool is_native_method() const                   { return method()->is_native(); }
 338   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
 339 
 340   bool is_compiled_by_c1() const;
 341   bool is_compiled_by_c2() const;
 342 
 343   // boundaries for different parts
 344   address code_begin            () const          { return _entry_point; }
 345   address code_end              () const          { return           header_begin() + _stub_offset          ; }
 346   address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
 347   address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
 348   address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
 349   address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
 350   address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
 351   address stub_end              () const          { return           header_begin() + _consts_offset        ; }
 352   address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
 353   address consts_end            () const          { return           header_begin() + _oops_offset          ; }
 354   oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
 355   oop*    oops_end              () const          { return (oop*)   (header_begin() + _scopes_data_offset)  ; }
 356 
 357   address scopes_data_begin     () const          { return           header_begin() + _scopes_data_offset   ; }
 358   address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
 359   PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
 360   PcDesc* scopes_pcs_end        () const          { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
 361   address dependencies_begin    () const          { return           header_begin() + _dependencies_offset  ; }
 362   address dependencies_end      () const          { return           header_begin() + _handler_table_offset ; }
 363   address handler_table_begin   () const          { return           header_begin() + _handler_table_offset ; }
 364   address handler_table_end     () const          { return           header_begin() + _nul_chk_table_offset ; }
 365   address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
 366   address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
 367 
 368   // Sizes
 369   int code_size         () const                  { return            code_end         () -            code_begin         (); }
 370   int stub_size         () const                  { return            stub_end         () -            stub_begin         (); }
 371   int consts_size       () const                  { return            consts_end       () -            consts_begin       (); }
 372   int oops_size         () const                  { return (address)  oops_end         () - (address)  oops_begin         (); }
 373   int scopes_data_size  () const                  { return            scopes_data_end  () -            scopes_data_begin  (); }
 374   int scopes_pcs_size   () const                  { return (intptr_t) scopes_pcs_end   () - (intptr_t) scopes_pcs_begin   (); }
 375   int dependencies_size () const                  { return            dependencies_end () -            dependencies_begin (); }
 376   int handler_table_size() const                  { return            handler_table_end() -            handler_table_begin(); }
 377   int nul_chk_table_size() const                  { return            nul_chk_table_end() -            nul_chk_table_begin(); }
 378 
 379   int total_size        () const;
 380 
 381   // Containment
 382   bool code_contains         (address addr) const { return code_begin         () <= addr && addr < code_end         (); }
 383   bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
 384   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
 385   bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 386   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 387   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 388   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 389   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 390 
 391   // entry points
 392   address entry_point() const                     { return _entry_point;             } // normal entry point
 393   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 394 
 395   // flag accessing and manipulation
 396   bool  is_in_use() const                         { return flags.state == alive; }
 397   bool  is_alive() const                          { return flags.state == alive || flags.state == not_entrant; }
 398   bool  is_not_entrant() const                    { return flags.state == not_entrant; }
 399   bool  is_zombie() const                         { return flags.state == zombie; }
 400   bool  is_unloaded() const                       { return flags.state == unloaded;   }
 401 
 402   // Make the nmethod non entrant. The nmethod will continue to be
 403   // alive.  It is used when an uncommon trap happens.  Returns true
 404   // if this thread changed the state of the nmethod or false if
 405   // another thread performed the transition.
 406   bool  make_not_entrant()                        { return make_not_entrant_or_zombie(not_entrant); }
 407   bool  make_zombie()                             { return make_not_entrant_or_zombie(zombie); }
 408 
 409   // used by jvmti to track if the unload event has been reported
 410   bool  unload_reported()                         { return _unload_reported; }
 411   void  set_unload_reported()                     { _unload_reported = true; }
 412 
 413   bool  is_marked_for_deoptimization() const      { return _markedForDeoptimization; }
 414   void  mark_for_deoptimization()                 { _markedForDeoptimization = true; }
 415 
 416   void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
 417 
 418   bool has_dependencies()                         { return dependencies_size() != 0; }
 419   void flush_dependencies(BoolObjectClosure* is_alive);
 420   bool  has_flushed_dependencies()                { return flags.hasFlushedDependencies; }
 421   void  set_has_flushed_dependencies()            {
 422     check_safepoint();
 423     assert(!has_flushed_dependencies(), "should only happen once");
 424     flags.hasFlushedDependencies = 1;
 425   }
 426 
 427   bool  is_marked_for_reclamation() const         { return flags.markedForReclamation; }
 428   void  mark_for_reclamation()                    { check_safepoint(); flags.markedForReclamation = 1; }
 429   void  unmark_for_reclamation()                  { check_safepoint(); flags.markedForReclamation = 0; }
 430 
 431   bool  has_unsafe_access() const                 { return flags.has_unsafe_access; }
 432   void  set_has_unsafe_access(bool z)             { flags.has_unsafe_access = z; }
 433 
 434   bool  has_method_handle_invokes() const         { return flags.has_method_handle_invokes; }
 435   void  set_has_method_handle_invokes(bool z)     { flags.has_method_handle_invokes = z; }
 436 
 437   bool  is_speculatively_disconnected() const     { return flags.speculatively_disconnected; }
 438   void  set_speculatively_disconnected(bool z)     { flags.speculatively_disconnected = z; }
 439 
 440   int   level() const                             { return flags.level; }
 441   void  set_level(int newLevel)                   { check_safepoint(); flags.level = newLevel; }
 442 
 443   int   comp_level() const                        { return _comp_level; }
 444 
 445   int   version() const                           { return flags.version; }
 446   void  set_version(int v);
 447 
 448   // Support for oops in scopes and relocs:
 449   // Note: index 0 is reserved for null.
 450   oop   oop_at(int index) const                   { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
 451   oop*  oop_addr_at(int index) const {  // for GC
 452     // relocation indexes are biased by 1 (because 0 is reserved)
 453     assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
 454     return &oops_begin()[index - 1];
 455   }
 456 
 457   void copy_oops(GrowableArray<jobject>* oops);
 458 
 459   // Relocation support
 460 private:
 461   void fix_oop_relocations(address begin, address end, bool initialize_immediates);
 462   inline void initialize_immediate_oop(oop* dest, jobject handle);
 463 
 464 public:
 465   void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
 466   void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
 467 
 468   bool is_at_poll_return(address pc);
 469   bool is_at_poll_or_poll_return(address pc);
 470 
 471   // Non-perm oop support
 472   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
 473  protected:
 474   enum { npl_on_list = 0x01, npl_marked = 0x10 };
 475   void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
 476   void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
 477   // assertion-checking and pruning logic uses the bits of _scavenge_root_state
 478 #ifndef PRODUCT
 479   void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
 480   void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
 481   bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
 482   // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
 483 #endif //PRODUCT
 484   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
 485   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
 486 
 487   nmethod* saved_nmethod_link() const                  { return _saved_nmethod_link; }
 488   void     set_saved_nmethod_link(nmethod *n)          { _saved_nmethod_link = n; }
 489 
 490  public:
 491 
 492   // Sweeper support
 493   long  stack_traversal_mark()                    { return _stack_traversal_mark; }
 494   void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
 495 
 496   // Exception cache support
 497   ExceptionCache* exception_cache() const         { return _exception_cache; }
 498   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
 499   address handler_for_exception_and_pc(Handle exception, address pc);
 500   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 501   void remove_from_exception_cache(ExceptionCache* ec);
 502 
 503   // implicit exceptions support
 504   address continuation_for_implicit_exception(address pc);
 505 
 506   // On-stack replacement support
 507   int   osr_entry_bci() const                     { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
 508   address  osr_entry() const                      { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
 509   void  invalidate_osr_method();
 510   nmethod* osr_link() const                       { return _osr_link; }
 511   void     set_osr_link(nmethod *n)               { _osr_link = n; }
 512 
 513   // tells whether frames described by this nmethod can be deoptimized
 514   // note: native wrappers cannot be deoptimized.
 515   bool can_be_deoptimized() const { return is_java_method(); }
 516 
 517   // Inline cache support
 518   void clear_inline_caches();
 519   void cleanup_inline_caches();
 520   bool inlinecache_check_contains(address addr) const {
 521     return (addr >= instructions_begin() && addr < verified_entry_point());
 522   }
 523 
 524   // unlink and deallocate this nmethod
 525   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
 526   // expected to use any other private methods/data in this class.
 527 
 528  protected:
 529   void flush();
 530 
 531  public:
 532   // If returning true, it is unsafe to remove this nmethod even though it is a zombie
 533   // nmethod, since the VM might have a reference to it. Should only be called from a  safepoint.
 534   bool is_locked_by_vm() const                    { return _lock_count >0; }
 535 
 536   // See comment at definition of _last_seen_on_stack
 537   void mark_as_seen_on_stack();
 538   bool can_not_entrant_be_converted();
 539 
 540   // Evolution support. We make old (discarded) compiled methods point to new methodOops.
 541   void set_method(methodOop method) { _method = method; }
 542 
 543   // GC support
 544   void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
 545                     bool unloading_occurred);
 546   bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
 547                   oop* root, bool unloading_occurred);
 548 
 549   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
 550                                      OopClosure* f);
 551   void oops_do(OopClosure* f) { oops_do(f, false); }
 552   void oops_do(OopClosure* f, bool do_strong_roots_only);
 553   bool detect_scavenge_root_oops();
 554   void verify_scavenge_root_oops() PRODUCT_RETURN;
 555 
 556   bool test_set_oops_do_mark();
 557   static void oops_do_marking_prologue();
 558   static void oops_do_marking_epilogue();
 559   static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
 560   DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
 561 
 562   // ScopeDesc for an instruction
 563   ScopeDesc* scope_desc_at(address pc);
 564 
 565  private:
 566   ScopeDesc* scope_desc_in(address begin, address end);
 567 
 568   address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
 569 
 570   PcDesc* find_pc_desc_internal(address pc, bool approximate);
 571 
 572   PcDesc* find_pc_desc(address pc, bool approximate) {
 573     PcDesc* desc = _pc_desc_cache.last_pc_desc();
 574     if (desc != NULL && desc->pc_offset() == pc - instructions_begin()) {
 575       return desc;
 576     }
 577     return find_pc_desc_internal(pc, approximate);
 578   }
 579 
 580  public:
 581   // ScopeDesc retrieval operation
 582   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
 583   // pc_desc_near returns the first PcDesc at or after the givne pc.
 584   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
 585 
 586  public:
 587   // copying of debugging information
 588   void copy_scopes_pcs(PcDesc* pcs, int count);
 589   void copy_scopes_data(address buffer, int size);
 590 
 591   // Deopt
 592   // Return true is the PC is one would expect if the frame is being deopted.
 593   bool is_deopt_pc      (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
 594   bool is_deopt_entry   (address pc) { return pc == deopt_handler_begin(); }
 595   bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
 596   // Accessor/mutator for the original pc of a frame before a frame was deopted.
 597   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
 598   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 599 
 600   static address get_deopt_original_pc(const frame* fr);
 601 
 602   // MethodHandle
 603   bool is_method_handle_return(address return_pc);
 604 
 605   // jvmti support:
 606   void post_compiled_method_load_event();
 607 
 608   // verify operations
 609   void verify();
 610   void verify_scopes();
 611   void verify_interrupt_point(address interrupt_point);
 612 
 613   // printing support
 614   void print()                          const;
 615   void print_code();
 616   void print_relocations()                        PRODUCT_RETURN;
 617   void print_pcs()                                PRODUCT_RETURN;
 618   void print_scopes()                             PRODUCT_RETURN;
 619   void print_dependencies()                       PRODUCT_RETURN;
 620   void print_value_on(outputStream* st) const     PRODUCT_RETURN;
 621   void print_calls(outputStream* st)              PRODUCT_RETURN;
 622   void print_handler_table()                      PRODUCT_RETURN;
 623   void print_nul_chk_table()                      PRODUCT_RETURN;
 624   void print_nmethod(bool print_code);
 625 
 626   void print_on(outputStream* st, const char* title) const;
 627 
 628   // Logging
 629   void log_identity(xmlStream* log) const;
 630   void log_new_nmethod() const;
 631   void log_state_change() const;
 632 
 633   // Prints block-level comments, including nmethod specific block labels:
 634   virtual void print_block_comment(outputStream* stream, address block_begin) {
 635     print_nmethod_labels(stream, block_begin);
 636     CodeBlob::print_block_comment(stream, block_begin);
 637   }
 638   void print_nmethod_labels(outputStream* stream, address block_begin);
 639 
 640   // Prints a comment for one native instruction (reloc info, pc desc)
 641   void print_code_comment_on(outputStream* st, int column, address begin, address end);
 642   static void print_statistics()                  PRODUCT_RETURN;
 643 
 644   // Compiler task identification.  Note that all OSR methods
 645   // are numbered in an independent sequence if CICountOSR is true,
 646   // and native method wrappers are also numbered independently if
 647   // CICountNative is true.
 648   int  compile_id() const                         { return _compile_id; }
 649   const char* compile_kind() const;
 650 
 651   // For debugging
 652   // CompiledIC*    IC_at(char* p) const;
 653   // PrimitiveIC*   primitiveIC_at(char* p) const;
 654   oop embeddedOop_at(address p);
 655 
 656   // tells if any of this method's dependencies have been invalidated
 657   // (this is expensive!)
 658   bool check_all_dependencies();
 659 
 660   // tells if this compiled method is dependent on the given changes,
 661   // and the changes have invalidated it
 662   bool check_dependency_on(DepChange& changes);
 663 
 664   // Evolution support. Tells if this compiled method is dependent on any of
 665   // methods m() of class dependee, such that if m() in dependee is replaced,
 666   // this compiled method will have to be deoptimized.
 667   bool is_evol_dependent_on(klassOop dependee);
 668 
 669   // Fast breakpoint support. Tells if this compiled method is
 670   // dependent on the given method. Returns true if this nmethod
 671   // corresponds to the given method as well.
 672   bool is_dependent_on_method(methodOop dependee);
 673 
 674   // is it ok to patch at address?
 675   bool is_patchable_at(address instr_address);
 676 
 677   // UseBiasedLocking support
 678   ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() {
 679     return _compiled_synchronized_native_basic_lock_owner_sp_offset;
 680   }
 681   ByteSize compiled_synchronized_native_basic_lock_sp_offset() {
 682     return _compiled_synchronized_native_basic_lock_sp_offset;
 683   }
 684 
 685   // support for code generation
 686   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 687   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 688   static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }
 689 
 690 };
 691 
 692 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
 693 class nmethodLocker : public StackObj {
 694   nmethod* _nm;
 695 
 696   static void lock_nmethod(nmethod* nm);   // note: nm can be NULL
 697   static void unlock_nmethod(nmethod* nm); // (ditto)
 698 
 699  public:
 700   nmethodLocker(address pc); // derive nm from pc
 701   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
 702   nmethodLocker() { _nm = NULL; }
 703   ~nmethodLocker() { unlock_nmethod(_nm); }
 704 
 705   nmethod* code() { return _nm; }
 706   void set_code(nmethod* new_nm) {
 707     unlock_nmethod(_nm);   // note:  This works even if _nm==new_nm.
 708     _nm = new_nm;
 709     lock_nmethod(_nm);
 710   }
 711 };