1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // This class is used internally by nmethods, to cache
  26 // exception/pc/handler information.
  27 
  28 class ExceptionCache : public CHeapObj {
  29   friend class VMStructs;
  30  private:
  31   static address _unwind_handler;
  32   enum { cache_size = 16 };
  33   klassOop _exception_type;
  34   address  _pc[cache_size];
  35   address  _handler[cache_size];
  36   int      _count;
  37   ExceptionCache* _next;
  38 
  39   address pc_at(int index)                     { assert(index >= 0 && index < count(),""); return _pc[index]; }
  40   void    set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  41   address handler_at(int index)                { assert(index >= 0 && index < count(),""); return _handler[index]; }
  42   void    set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  43   int     count()                              { return _count; }
  44   void    increment_count()                    { _count++; }
  45 
  46  public:
  47 
  48   ExceptionCache(Handle exception, address pc, address handler);
  49 
  50   klassOop  exception_type()                { return _exception_type; }
  51   klassOop* exception_type_addr()           { return &_exception_type; }
  52   ExceptionCache* next()                    { return _next; }
  53   void      set_next(ExceptionCache *ec)    { _next = ec; }
  54 
  55   address match(Handle exception, address pc);
  56   bool    match_exception_with_space(Handle exception) ;
  57   address test_address(address addr);
  58   bool    add_address_and_handler(address addr, address handler) ;
  59 
  60   static address unwind_handler() { return _unwind_handler; }
  61 };
  62 
  63 
  64 // cache pc descs found in earlier inquiries
  65 class PcDescCache VALUE_OBJ_CLASS_SPEC {
  66   friend class VMStructs;
  67  private:
  68   enum { cache_size = 4 };
  69   PcDesc* _last_pc_desc;         // most recent pc_desc found
  70   PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
  71  public:
  72   PcDescCache() { debug_only(_last_pc_desc = NULL); }
  73   void    reset_to(PcDesc* initial_pc_desc);
  74   PcDesc* find_pc_desc(int pc_offset, bool approximate);
  75   void    add_pc_desc(PcDesc* pc_desc);
  76   PcDesc* last_pc_desc() { return _last_pc_desc; }
  77 };
  78 
  79 
  80 // nmethods (native methods) are the compiled code versions of Java methods.
  81 //
  82 // An nmethod contains:
  83 //  - header                 (the nmethod structure)
  84 //  [Relocation]
  85 //  - relocation information
  86 //  - constant part          (doubles, longs and floats used in nmethod)
  87 //  - oop table
  88 //  [Code]
  89 //  - code body
  90 //  - exception handler
  91 //  - stub code
  92 //  [Debugging information]
  93 //  - oop array
  94 //  - data array
  95 //  - pcs
  96 //  [Exception handler table]
  97 //  - handler entry point array
  98 //  [Implicit Null Pointer exception table]
  99 //  - implicit null table array
 100 
 101 class Dependencies;
 102 class ExceptionHandlerTable;
 103 class ImplicitExceptionTable;
 104 class AbstractCompiler;
 105 class xmlStream;
 106 
 107 class nmethod : public CodeBlob {
 108   friend class VMStructs;
 109   friend class NMethodSweeper;
 110   friend class CodeCache;  // non-perm oops
 111  private:
 112   // Shared fields for all nmethod's
 113   methodOop _method;
 114   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
 115   jmethodID _jmethod_id;       // Cache of method()->jmethod_id()
 116 
 117   // To support simple linked-list chaining of nmethods:
 118   nmethod*  _osr_link;         // from instanceKlass::osr_nmethods_head
 119   nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
 120   nmethod*  _saved_nmethod_link; // from CodeCache::speculatively_disconnect
 121 
 122   static nmethod* volatile _oops_do_mark_nmethods;
 123   nmethod*        volatile _oops_do_mark_link;
 124 
 125   AbstractCompiler* _compiler; // The compiler which compiled this nmethod
 126 
 127   // offsets for entry points
 128   address _entry_point;                      // entry point with class check
 129   address _verified_entry_point;             // entry point without class check
 130   address _osr_entry_point;                  // entry point for on stack replacement
 131 
 132   // Offsets for different nmethod parts
 133   int _exception_offset;
 134   // All deoptee's will resume execution at this location described by
 135   // this offset.
 136   int _deoptimize_offset;
 137   // All deoptee's at a MethodHandle call site will resume execution
 138   // at this location described by this offset.
 139   int _deoptimize_mh_offset;
 140   // Offset of the unwind handler if it exists
 141   int _unwind_handler_offset;
 142 
 143 #ifdef HAVE_DTRACE_H
 144   int _trap_offset;
 145 #endif // def HAVE_DTRACE_H
 146   int _consts_offset;
 147   int _stub_offset;
 148   int _oops_offset;                       // offset to where embedded oop table begins (inside data)
 149   int _scopes_data_offset;
 150   int _scopes_pcs_offset;
 151   int _dependencies_offset;
 152   int _handler_table_offset;
 153   int _nul_chk_table_offset;
 154   int _nmethod_end_offset;
 155 
 156   // location in frame (offset for sp) that deopt can store the original
 157   // pc during a deopt.
 158   int _orig_pc_offset;
 159 
 160   int _compile_id;                           // which compilation made this nmethod
 161   int _comp_level;                           // compilation level
 162 
 163   // protected by CodeCache_lock
 164   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
 165   bool _speculatively_disconnected;          // Marked for potential unload
 166 
 167   bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
 168   bool _marked_for_deoptimization;           // Used for stack deoptimization
 169 
 170   // used by jvmti to track if an unload event has been posted for this nmethod.
 171   bool _unload_reported;
 172 
 173   // set during construction
 174   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 175   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 176 
 177   // Protected by Patching_lock
 178   unsigned char _state;                      // {alive, not_entrant, zombie, unloaded)
 179 
 180 #ifdef ASSERT
 181   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 182 #endif
 183 
 184   enum { alive        = 0,
 185          not_entrant  = 1, // uncommon trap has happened but activations may still exist
 186          zombie       = 2,
 187          unloaded     = 3 };
 188 
 189 
 190   jbyte _scavenge_root_state;
 191 
 192   NOT_PRODUCT(bool _has_debug_info; )
 193 
 194   // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
 195   jint  _lock_count;
 196 
 197   // not_entrant method removal. Each mark_sweep pass will update
 198   // this mark to current sweep invocation count if it is seen on the
 199   // stack.  An not_entrant method can be removed when there is no
 200   // more activations, i.e., when the _stack_traversal_mark is less than
 201   // current sweep traversal index.
 202   long _stack_traversal_mark;
 203 
 204   ExceptionCache *_exception_cache;
 205   PcDescCache     _pc_desc_cache;
 206 
 207   // These are only used for compiled synchronized native methods to
 208   // locate the owner and stack slot for the BasicLock so that we can
 209   // properly revoke the bias of the owner if necessary. They are
 210   // needed because there is no debug information for compiled native
 211   // wrappers and the oop maps are insufficient to allow
 212   // frame::retrieve_receiver() to work. Currently they are expected
 213   // to be byte offsets from the Java stack pointer for maximum code
 214   // sharing between platforms. Note that currently biased locking
 215   // will never cause Class instances to be biased but this code
 216   // handles the static synchronized case as well.
 217   ByteSize _compiled_synchronized_native_basic_lock_owner_sp_offset;
 218   ByteSize _compiled_synchronized_native_basic_lock_sp_offset;
 219 
 220   friend class nmethodLocker;
 221 
 222   // For native wrappers
 223   nmethod(methodOop method,
 224           int nmethod_size,
 225           CodeOffsets* offsets,
 226           CodeBuffer *code_buffer,
 227           int frame_size,
 228           ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
 229           ByteSize basic_lock_sp_offset,       /* synchronized natives only */
 230           OopMapSet* oop_maps);
 231 
 232 #ifdef HAVE_DTRACE_H
 233   // For native wrappers
 234   nmethod(methodOop method,
 235           int nmethod_size,
 236           CodeOffsets* offsets,
 237           CodeBuffer *code_buffer,
 238           int frame_size);
 239 #endif // def HAVE_DTRACE_H
 240 
 241   // Creation support
 242   nmethod(methodOop method,
 243           int nmethod_size,
 244           int compile_id,
 245           int entry_bci,
 246           CodeOffsets* offsets,
 247           int orig_pc_offset,
 248           DebugInformationRecorder *recorder,
 249           Dependencies* dependencies,
 250           CodeBuffer *code_buffer,
 251           int frame_size,
 252           OopMapSet* oop_maps,
 253           ExceptionHandlerTable* handler_table,
 254           ImplicitExceptionTable* nul_chk_table,
 255           AbstractCompiler* compiler,
 256           int comp_level);
 257 
 258   // helper methods
 259   void* operator new(size_t size, int nmethod_size);
 260 
 261   const char* reloc_string_for(u_char* begin, u_char* end);
 262   // Returns true if this thread changed the state of the nmethod or
 263   // false if another thread performed the transition.
 264   bool make_not_entrant_or_zombie(unsigned int state);
 265   void inc_decompile_count();
 266 
 267   // Used to manipulate the exception cache
 268   void add_exception_cache_entry(ExceptionCache* new_entry);
 269   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 270 
 271   // Inform external interfaces that a compiled method has been unloaded
 272   void post_compiled_method_unload();
 273 
 274   // Initailize fields to their default values
 275   void init_defaults();
 276 
 277  public:
 278   // create nmethod with entry_bci
 279   static nmethod* new_nmethod(methodHandle method,
 280                               int compile_id,
 281                               int entry_bci,
 282                               CodeOffsets* offsets,
 283                               int orig_pc_offset,
 284                               DebugInformationRecorder* recorder,
 285                               Dependencies* dependencies,
 286                               CodeBuffer *code_buffer,
 287                               int frame_size,
 288                               OopMapSet* oop_maps,
 289                               ExceptionHandlerTable* handler_table,
 290                               ImplicitExceptionTable* nul_chk_table,
 291                               AbstractCompiler* compiler,
 292                               int comp_level);
 293 
 294   static nmethod* new_native_nmethod(methodHandle method,
 295                                      CodeBuffer *code_buffer,
 296                                      int vep_offset,
 297                                      int frame_complete,
 298                                      int frame_size,
 299                                      ByteSize receiver_sp_offset,
 300                                      ByteSize basic_lock_sp_offset,
 301                                      OopMapSet* oop_maps);
 302 
 303 #ifdef HAVE_DTRACE_H
 304   // The method we generate for a dtrace probe has to look
 305   // like an nmethod as far as the rest of the system is concerned
 306   // which is somewhat unfortunate.
 307   static nmethod* new_dtrace_nmethod(methodHandle method,
 308                                      CodeBuffer *code_buffer,
 309                                      int vep_offset,
 310                                      int trap_offset,
 311                                      int frame_complete,
 312                                      int frame_size);
 313 
 314   int trap_offset() const      { return _trap_offset; }
 315   address trap_address() const { return insts_begin() + _trap_offset; }
 316 
 317 #endif // def HAVE_DTRACE_H
 318 
 319   // accessors
 320   methodOop method() const                        { return _method; }
 321   AbstractCompiler* compiler() const              { return _compiler; }
 322 
 323 #ifndef PRODUCT
 324   bool has_debug_info() const                     { return _has_debug_info; }
 325   void set_has_debug_info(bool f)                 { _has_debug_info = false; }
 326 #endif // NOT PRODUCT
 327 
 328   // type info
 329   bool is_nmethod() const                         { return true; }
 330   bool is_java_method() const                     { return !method()->is_native(); }
 331   bool is_native_method() const                   { return method()->is_native(); }
 332   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
 333 
 334   bool is_compiled_by_c1() const;
 335   bool is_compiled_by_c2() const;
 336   bool is_compiled_by_shark() const;
 337 
 338   // boundaries for different parts
 339   address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
 340   address consts_end            () const          { return           header_begin() +  code_offset()        ; }
 341   address insts_begin           () const          { return           header_begin() +  code_offset()        ; }
 342   address insts_end             () const          { return           header_begin() + _stub_offset          ; }
 343   address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
 344   address stub_end              () const          { return           header_begin() + _oops_offset          ; }
 345   address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
 346   address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
 347   address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
 348   address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
 349   oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
 350   oop*    oops_end              () const          { return (oop*)   (header_begin() + _scopes_data_offset)  ; }
 351 
 352   address scopes_data_begin     () const          { return           header_begin() + _scopes_data_offset   ; }
 353   address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
 354   PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
 355   PcDesc* scopes_pcs_end        () const          { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
 356   address dependencies_begin    () const          { return           header_begin() + _dependencies_offset  ; }
 357   address dependencies_end      () const          { return           header_begin() + _handler_table_offset ; }
 358   address handler_table_begin   () const          { return           header_begin() + _handler_table_offset ; }
 359   address handler_table_end     () const          { return           header_begin() + _nul_chk_table_offset ; }
 360   address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
 361   address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
 362 
 363   // Sizes
 364   int consts_size       () const                  { return            consts_end       () -            consts_begin       (); }
 365   int insts_size        () const                  { return            insts_end        () -            insts_begin        (); }
 366   int stub_size         () const                  { return            stub_end         () -            stub_begin         (); }
 367   int oops_size         () const                  { return (address)  oops_end         () - (address)  oops_begin         (); }
 368   int scopes_data_size  () const                  { return            scopes_data_end  () -            scopes_data_begin  (); }
 369   int scopes_pcs_size   () const                  { return (intptr_t) scopes_pcs_end   () - (intptr_t) scopes_pcs_begin   (); }
 370   int dependencies_size () const                  { return            dependencies_end () -            dependencies_begin (); }
 371   int handler_table_size() const                  { return            handler_table_end() -            handler_table_begin(); }
 372   int nul_chk_table_size() const                  { return            nul_chk_table_end() -            nul_chk_table_begin(); }
 373 
 374   int total_size        () const;
 375 
 376   // Containment
 377   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
 378   bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
 379   bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
 380   bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 381   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 382   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 383   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 384   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 385 
 386   // entry points
 387   address entry_point() const                     { return _entry_point;             } // normal entry point
 388   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 389 
 390   // flag accessing and manipulation
 391   bool  is_in_use() const                         { return _state == alive; }
 392   bool  is_alive() const                          { return _state == alive || _state == not_entrant; }
 393   bool  is_not_entrant() const                    { return _state == not_entrant; }
 394   bool  is_zombie() const                         { return _state == zombie; }
 395   bool  is_unloaded() const                       { return _state == unloaded;   }
 396 
 397   // Make the nmethod non entrant. The nmethod will continue to be
 398   // alive.  It is used when an uncommon trap happens.  Returns true
 399   // if this thread changed the state of the nmethod or false if
 400   // another thread performed the transition.
 401   bool  make_not_entrant()                        { return make_not_entrant_or_zombie(not_entrant); }
 402   bool  make_zombie()                             { return make_not_entrant_or_zombie(zombie); }
 403 
 404   // used by jvmti to track if the unload event has been reported
 405   bool  unload_reported()                         { return _unload_reported; }
 406   void  set_unload_reported()                     { _unload_reported = true; }
 407 
 408   bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
 409   void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
 410 
 411   void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
 412 
 413   bool has_dependencies()                         { return dependencies_size() != 0; }
 414   void flush_dependencies(BoolObjectClosure* is_alive);
 415   bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
 416   void set_has_flushed_dependencies()             {
 417     assert(!has_flushed_dependencies(), "should only happen once");
 418     _has_flushed_dependencies = 1;
 419   }
 420 
 421   bool  is_marked_for_reclamation() const         { return _marked_for_reclamation; }
 422   void  mark_for_reclamation()                    { _marked_for_reclamation = 1; }
 423 
 424   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 425   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 426 
 427   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
 428   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 429 
 430   bool  is_speculatively_disconnected() const     { return _speculatively_disconnected; }
 431   void  set_speculatively_disconnected(bool z)     { _speculatively_disconnected = z; }
 432 
 433   int   comp_level() const                        { return _comp_level; }
 434 
 435   // Support for oops in scopes and relocs:
 436   // Note: index 0 is reserved for null.
 437   oop   oop_at(int index) const                   { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
 438   oop*  oop_addr_at(int index) const {  // for GC
 439     // relocation indexes are biased by 1 (because 0 is reserved)
 440     assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
 441     assert(!_oops_are_stale, "oops are stale");
 442     return &oops_begin()[index - 1];
 443   }
 444 
 445   void copy_oops(GrowableArray<jobject>* oops);
 446 
 447   // Relocation support
 448 private:
 449   void fix_oop_relocations(address begin, address end, bool initialize_immediates);
 450   inline void initialize_immediate_oop(oop* dest, jobject handle);
 451 
 452 public:
 453   void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
 454   void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
 455 
 456   bool is_at_poll_return(address pc);
 457   bool is_at_poll_or_poll_return(address pc);
 458 
 459   // Non-perm oop support
 460   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
 461  protected:
 462   enum { npl_on_list = 0x01, npl_marked = 0x10 };
 463   void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
 464   void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
 465   // assertion-checking and pruning logic uses the bits of _scavenge_root_state
 466 #ifndef PRODUCT
 467   void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
 468   void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
 469   bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
 470   // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
 471 #endif //PRODUCT
 472   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
 473   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
 474 
 475   nmethod* saved_nmethod_link() const                  { return _saved_nmethod_link; }
 476   void     set_saved_nmethod_link(nmethod *n)          { _saved_nmethod_link = n; }
 477 
 478  public:
 479 
 480   // Sweeper support
 481   long  stack_traversal_mark()                    { return _stack_traversal_mark; }
 482   void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
 483 
 484   // Exception cache support
 485   ExceptionCache* exception_cache() const         { return _exception_cache; }
 486   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
 487   address handler_for_exception_and_pc(Handle exception, address pc);
 488   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 489   void remove_from_exception_cache(ExceptionCache* ec);
 490 
 491   // implicit exceptions support
 492   address continuation_for_implicit_exception(address pc);
 493 
 494   // On-stack replacement support
 495   int   osr_entry_bci() const                     { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
 496   address  osr_entry() const                      { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
 497   void  invalidate_osr_method();
 498   nmethod* osr_link() const                       { return _osr_link; }
 499   void     set_osr_link(nmethod *n)               { _osr_link = n; }
 500 
 501   // tells whether frames described by this nmethod can be deoptimized
 502   // note: native wrappers cannot be deoptimized.
 503   bool can_be_deoptimized() const { return is_java_method(); }
 504 
 505   // Inline cache support
 506   void clear_inline_caches();
 507   void cleanup_inline_caches();
 508   bool inlinecache_check_contains(address addr) const {
 509     return (addr >= code_begin() && addr < verified_entry_point());
 510   }
 511 
 512   // unlink and deallocate this nmethod
 513   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
 514   // expected to use any other private methods/data in this class.
 515 
 516  protected:
 517   void flush();
 518 
 519  public:
 520   // If returning true, it is unsafe to remove this nmethod even though it is a zombie
 521   // nmethod, since the VM might have a reference to it. Should only be called from a  safepoint.
 522   bool is_locked_by_vm() const                    { return _lock_count >0; }
 523 
 524   // See comment at definition of _last_seen_on_stack
 525   void mark_as_seen_on_stack();
 526   bool can_not_entrant_be_converted();
 527 
 528   // Evolution support. We make old (discarded) compiled methods point to new methodOops.
 529   void set_method(methodOop method) { _method = method; }
 530 
 531   // GC support
 532   void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
 533                     bool unloading_occurred);
 534   bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
 535                   oop* root, bool unloading_occurred);
 536 
 537   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
 538                                      OopClosure* f);
 539   void oops_do(OopClosure* f) { oops_do(f, false); }
 540   void oops_do(OopClosure* f, bool do_strong_roots_only);
 541   bool detect_scavenge_root_oops();
 542   void verify_scavenge_root_oops() PRODUCT_RETURN;
 543 
 544   bool test_set_oops_do_mark();
 545   static void oops_do_marking_prologue();
 546   static void oops_do_marking_epilogue();
 547   static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
 548   DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
 549 
 550   // ScopeDesc for an instruction
 551   ScopeDesc* scope_desc_at(address pc);
 552 
 553  private:
 554   ScopeDesc* scope_desc_in(address begin, address end);
 555 
 556   address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
 557 
 558   PcDesc* find_pc_desc_internal(address pc, bool approximate);
 559 
 560   PcDesc* find_pc_desc(address pc, bool approximate) {
 561     PcDesc* desc = _pc_desc_cache.last_pc_desc();
 562     if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
 563       return desc;
 564     }
 565     return find_pc_desc_internal(pc, approximate);
 566   }
 567 
 568  public:
 569   // ScopeDesc retrieval operation
 570   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
 571   // pc_desc_near returns the first PcDesc at or after the givne pc.
 572   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
 573 
 574  public:
 575   // copying of debugging information
 576   void copy_scopes_pcs(PcDesc* pcs, int count);
 577   void copy_scopes_data(address buffer, int size);
 578 
 579   // Deopt
 580   // Return true is the PC is one would expect if the frame is being deopted.
 581   bool is_deopt_pc      (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
 582   bool is_deopt_entry   (address pc) { return pc == deopt_handler_begin(); }
 583   bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
 584   // Accessor/mutator for the original pc of a frame before a frame was deopted.
 585   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
 586   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 587 
 588   static address get_deopt_original_pc(const frame* fr);
 589 
 590   // MethodHandle
 591   bool is_method_handle_return(address return_pc);
 592 
 593   // jvmti support:
 594   void post_compiled_method_load_event();
 595   jmethodID get_and_cache_jmethod_id();
 596 
 597   // verify operations
 598   void verify();
 599   void verify_scopes();
 600   void verify_interrupt_point(address interrupt_point);
 601 
 602   // print compilation helper
 603   static void print_compilation(outputStream *st, const char *method_name, const char *title,
 604                                 methodOop method, bool is_blocking, int compile_id, int bci, int comp_level);
 605 
 606   // printing support
 607   void print()                          const;
 608   void print_code();
 609   void print_relocations()                        PRODUCT_RETURN;
 610   void print_pcs()                                PRODUCT_RETURN;
 611   void print_scopes()                             PRODUCT_RETURN;
 612   void print_dependencies()                       PRODUCT_RETURN;
 613   void print_value_on(outputStream* st) const     PRODUCT_RETURN;
 614   void print_calls(outputStream* st)              PRODUCT_RETURN;
 615   void print_handler_table()                      PRODUCT_RETURN;
 616   void print_nul_chk_table()                      PRODUCT_RETURN;
 617   void print_nmethod(bool print_code);
 618 
 619   // need to re-define this from CodeBlob else the overload hides it
 620   virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
 621   void print_on(outputStream* st, const char* title) const;
 622 
 623   // Logging
 624   void log_identity(xmlStream* log) const;
 625   void log_new_nmethod() const;
 626   void log_state_change() const;
 627 
 628   // Prints block-level comments, including nmethod specific block labels:
 629   virtual void print_block_comment(outputStream* stream, address block_begin) {
 630     print_nmethod_labels(stream, block_begin);
 631     CodeBlob::print_block_comment(stream, block_begin);
 632   }
 633   void print_nmethod_labels(outputStream* stream, address block_begin);
 634 
 635   // Prints a comment for one native instruction (reloc info, pc desc)
 636   void print_code_comment_on(outputStream* st, int column, address begin, address end);
 637   static void print_statistics()                  PRODUCT_RETURN;
 638 
 639   // Compiler task identification.  Note that all OSR methods
 640   // are numbered in an independent sequence if CICountOSR is true,
 641   // and native method wrappers are also numbered independently if
 642   // CICountNative is true.
 643   int  compile_id() const                         { return _compile_id; }
 644   const char* compile_kind() const;
 645 
 646   // For debugging
 647   // CompiledIC*    IC_at(char* p) const;
 648   // PrimitiveIC*   primitiveIC_at(char* p) const;
 649   oop embeddedOop_at(address p);
 650 
 651   // tells if any of this method's dependencies have been invalidated
 652   // (this is expensive!)
 653   bool check_all_dependencies();
 654 
 655   // tells if this compiled method is dependent on the given changes,
 656   // and the changes have invalidated it
 657   bool check_dependency_on(DepChange& changes);
 658 
 659   // Evolution support. Tells if this compiled method is dependent on any of
 660   // methods m() of class dependee, such that if m() in dependee is replaced,
 661   // this compiled method will have to be deoptimized.
 662   bool is_evol_dependent_on(klassOop dependee);
 663 
 664   // Fast breakpoint support. Tells if this compiled method is
 665   // dependent on the given method. Returns true if this nmethod
 666   // corresponds to the given method as well.
 667   bool is_dependent_on_method(methodOop dependee);
 668 
 669   // is it ok to patch at address?
 670   bool is_patchable_at(address instr_address);
 671 
 672   // UseBiasedLocking support
 673   ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() {
 674     return _compiled_synchronized_native_basic_lock_owner_sp_offset;
 675   }
 676   ByteSize compiled_synchronized_native_basic_lock_sp_offset() {
 677     return _compiled_synchronized_native_basic_lock_sp_offset;
 678   }
 679 
 680   // support for code generation
 681   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 682   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 683   static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }
 684 
 685 };
 686 
 687 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
 688 class nmethodLocker : public StackObj {
 689   nmethod* _nm;
 690 
 691   static void lock_nmethod(nmethod* nm);   // note: nm can be NULL
 692   static void unlock_nmethod(nmethod* nm); // (ditto)
 693 
 694  public:
 695   nmethodLocker(address pc); // derive nm from pc
 696   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
 697   nmethodLocker() { _nm = NULL; }
 698   ~nmethodLocker() { unlock_nmethod(_nm); }
 699 
 700   nmethod* code() { return _nm; }
 701   void set_code(nmethod* new_nm) {
 702     unlock_nmethod(_nm);   // note:  This works even if _nm==new_nm.
 703     _nm = new_nm;
 704     lock_nmethod(_nm);
 705   }
 706 };