1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_CODE_NMETHOD_HPP
  26 #define SHARE_VM_CODE_NMETHOD_HPP
  27 
  28 #include "code/codeBlob.hpp"
  29 #include "code/pcDesc.hpp"
  30 #include "oops/metadata.hpp"
  31 
  32 // This class is used internally by nmethods, to cache
  33 // exception/pc/handler information.
  34 
  35 class ExceptionCache : public CHeapObj<mtCode> {
  36   friend class VMStructs;
  37  private:
  38   enum { cache_size = 16 };
  39   Klass*   _exception_type;
  40   address  _pc[cache_size];
  41   address  _handler[cache_size];
  42   int      _count;
  43   ExceptionCache* _next;
  44 
  45   address pc_at(int index)                     { assert(index >= 0 && index < count(),""); return _pc[index]; }
  46   void    set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  47   address handler_at(int index)                { assert(index >= 0 && index < count(),""); return _handler[index]; }
  48   void    set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  49   int     count()                              { return _count; }
  50   void    increment_count()                    { _count++; }
  51 
  52  public:
  53 
  54   ExceptionCache(Handle exception, address pc, address handler);
  55 
  56   Klass*    exception_type()                { return _exception_type; }
  57   ExceptionCache* next()                    { return _next; }
  58   void      set_next(ExceptionCache *ec)    { _next = ec; }
  59 
  60   address match(Handle exception, address pc);
  61   bool    match_exception_with_space(Handle exception) ;
  62   address test_address(address addr);
  63   bool    add_address_and_handler(address addr, address handler) ;
  64 };
  65 
  66 
  67 // cache pc descs found in earlier inquiries
  68 class PcDescCache VALUE_OBJ_CLASS_SPEC {
  69   friend class VMStructs;
  70  private:
  71   enum { cache_size = 4 };
  72   // The array elements MUST be volatile! Several threads may modify
  73   // and read from the cache concurrently. find_pc_desc_internal has
  74   // returned wrong results. C++ compiler (namely xlC12) may duplicate
  75   // C++ field accesses if the elements are not volatile.
  76   typedef PcDesc* PcDescPtr;
  77   volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
  78  public:
  79   PcDescCache() { debug_only(_pc_descs[0] = NULL); }
  80   void    reset_to(PcDesc* initial_pc_desc);
  81   PcDesc* find_pc_desc(int pc_offset, bool approximate);
  82   void    add_pc_desc(PcDesc* pc_desc);
  83   PcDesc* last_pc_desc() { return _pc_descs[0]; }
  84 };
  85 
  86 
  87 // nmethods (native methods) are the compiled code versions of Java methods.
  88 //
  89 // An nmethod contains:
  90 //  - header                 (the nmethod structure)
  91 //  [Relocation]
  92 //  - relocation information
  93 //  - constant part          (doubles, longs and floats used in nmethod)
  94 //  - oop table
  95 //  [Code]
  96 //  - code body
  97 //  - exception handler
  98 //  - stub code
  99 //  [Debugging information]
 100 //  - oop array
 101 //  - data array
 102 //  - pcs
 103 //  [Exception handler table]
 104 //  - handler entry point array
 105 //  [Implicit Null Pointer exception table]
 106 //  - implicit null table array
 107 
 108 class Dependencies;
 109 class ExceptionHandlerTable;
 110 class ImplicitExceptionTable;
 111 class AbstractCompiler;
 112 class xmlStream;
 113 
 114 class nmethod : public CodeBlob {
 115   friend class VMStructs;
 116   friend class NMethodSweeper;
 117   friend class CodeCache;  // scavengable oops
 118  private:
 119 
 120   // GC support to help figure out if an nmethod has been
 121   // cleaned/unloaded by the current GC.
 122   static unsigned char _global_unloading_clock;
 123 
 124   // Shared fields for all nmethod's
 125   Method*   _method;
 126   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
 127   jmethodID _jmethod_id;       // Cache of method()->jmethod_id()
 128 
 129   // To support simple linked-list chaining of nmethods:
 130   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
 131 
 132   union {
 133     // Used by G1 to chain nmethods.
 134     nmethod* _unloading_next;
 135     // Used by non-G1 GCs to chain nmethods.
 136     nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
 137   };
 138 
 139   static nmethod* volatile _oops_do_mark_nmethods;
 140   nmethod*        volatile _oops_do_mark_link;
 141 
 142   AbstractCompiler* _compiler; // The compiler which compiled this nmethod
 143 
 144   // offsets for entry points
 145   address _entry_point;                      // entry point with class check
 146   address _verified_entry_point;             // entry point without class check
 147   address _osr_entry_point;                  // entry point for on stack replacement
 148 
 149   // Offsets for different nmethod parts
 150   int _exception_offset;
 151   // All deoptee's will resume execution at this location described by
 152   // this offset.
 153   int _deoptimize_offset;
 154   // All deoptee's at a MethodHandle call site will resume execution
 155   // at this location described by this offset.
 156   int _deoptimize_mh_offset;
 157   // Offset of the unwind handler if it exists
 158   int _unwind_handler_offset;
 159 
 160 #ifdef HAVE_DTRACE_H
 161   int _trap_offset;
 162 #endif // def HAVE_DTRACE_H
 163   int _consts_offset;
 164   int _stub_offset;
 165   int _oops_offset;                       // offset to where embedded oop table begins (inside data)
 166   int _metadata_offset;                   // embedded meta data table
 167   int _scopes_data_offset;
 168   int _scopes_pcs_offset;
 169   int _dependencies_offset;
 170   int _handler_table_offset;
 171   int _nul_chk_table_offset;
 172   int _nmethod_end_offset;
 173 
 174   // location in frame (offset for sp) that deopt can store the original
 175   // pc during a deopt.
 176   int _orig_pc_offset;
 177 
 178   int _compile_id;                           // which compilation made this nmethod
 179   int _comp_level;                           // compilation level
 180 
 181   // protected by CodeCache_lock
 182   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
 183 
 184   bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
 185   bool _marked_for_deoptimization;           // Used for stack deoptimization
 186 
 187   // used by jvmti to track if an unload event has been posted for this nmethod.
 188   bool _unload_reported;
 189 
 190   // set during construction
 191   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 192   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 193   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 194   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 195 
 196   // Protected by Patching_lock
 197   volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 198 
 199   volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
 200 
 201 #ifdef ASSERT
 202   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 203 #endif
 204 
 205   enum { in_use       = 0,   // executable nmethod
 206          not_entrant  = 1,   // marked for deoptimization but activations may still exist,
 207                              // will be transformed to zombie when all activations are gone
 208          zombie       = 2,   // no activations exist, nmethod is ready for purge
 209          unloaded     = 3 }; // there should be no activations, should not be called,
 210                              // will be transformed to zombie immediately
 211 
 212   jbyte _scavenge_root_state;
 213 
 214 #if INCLUDE_RTM_OPT
 215   // RTM state at compile time. Used during deoptimization to decide
 216   // whether to restart collecting RTM locking abort statistic again.
 217   RTMState _rtm_state;
 218 #endif
 219 
 220   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
 221   // and is not made into a zombie. However, once the nmethod is made into
 222   // a zombie, it will be locked one final time if CompiledMethodUnload
 223   // event processing needs to be done.
 224   volatile jint _lock_count;
 225 
 226   // not_entrant method removal. Each mark_sweep pass will update
 227   // this mark to current sweep invocation count if it is seen on the
 228   // stack.  An not_entrant method can be removed when there are no
 229   // more activations, i.e., when the _stack_traversal_mark is less than
 230   // current sweep traversal index.
 231   long _stack_traversal_mark;
 232 
 233   // The _hotness_counter indicates the hotness of a method. The higher
 234   // the value the hotter the method. The hotness counter of a nmethod is
 235   // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
 236   // is active while stack scanning (mark_active_nmethods()). The hotness
 237   // counter is decreased (by 1) while sweeping.
 238   int _hotness_counter;
 239 
 240   ExceptionCache *_exception_cache;
 241   PcDescCache     _pc_desc_cache;
 242 
 243   // These are used for compiled synchronized native methods to
 244   // locate the owner and stack slot for the BasicLock so that we can
 245   // properly revoke the bias of the owner if necessary. They are
 246   // needed because there is no debug information for compiled native
 247   // wrappers and the oop maps are insufficient to allow
 248   // frame::retrieve_receiver() to work. Currently they are expected
 249   // to be byte offsets from the Java stack pointer for maximum code
 250   // sharing between platforms. Note that currently biased locking
 251   // will never cause Class instances to be biased but this code
 252   // handles the static synchronized case as well.
 253   // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
 254   // for non-static native wrapper frames.
 255   ByteSize _native_receiver_sp_offset;
 256   ByteSize _native_basic_lock_sp_offset;
 257 
 258   friend class nmethodLocker;
 259 
 260   // For native wrappers
 261   nmethod(Method* method,
 262           int nmethod_size,
 263           int compile_id,
 264           CodeOffsets* offsets,
 265           CodeBuffer *code_buffer,
 266           int frame_size,
 267           ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
 268           ByteSize basic_lock_sp_offset,       /* synchronized natives only */
 269           OopMapSet* oop_maps);
 270 
 271 #ifdef HAVE_DTRACE_H
 272   // For native wrappers
 273   nmethod(Method* method,
 274           int nmethod_size,
 275           CodeOffsets* offsets,
 276           CodeBuffer *code_buffer,
 277           int frame_size);
 278 #endif // def HAVE_DTRACE_H
 279 
 280   // Creation support
 281   nmethod(Method* method,
 282           int nmethod_size,
 283           int compile_id,
 284           int entry_bci,
 285           CodeOffsets* offsets,
 286           int orig_pc_offset,
 287           DebugInformationRecorder *recorder,
 288           Dependencies* dependencies,
 289           CodeBuffer *code_buffer,
 290           int frame_size,
 291           OopMapSet* oop_maps,
 292           ExceptionHandlerTable* handler_table,
 293           ImplicitExceptionTable* nul_chk_table,
 294           AbstractCompiler* compiler,
 295           int comp_level);
 296 
 297   // helper methods
 298   void* operator new(size_t size, int nmethod_size) throw();
 299 
 300   const char* reloc_string_for(u_char* begin, u_char* end);
 301   // Returns true if this thread changed the state of the nmethod or
 302   // false if another thread performed the transition.
 303   bool make_not_entrant_or_zombie(unsigned int state);
 304   void inc_decompile_count();
 305 
 306   // Used to manipulate the exception cache
 307   void add_exception_cache_entry(ExceptionCache* new_entry);
 308   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 309 
 310   // Inform external interfaces that a compiled method has been unloaded
 311   void post_compiled_method_unload();
 312 
 313   // Initailize fields to their default values
 314   void init_defaults();
 315 
 316  public:
 317   // create nmethod with entry_bci
 318   static nmethod* new_nmethod(methodHandle method,
 319                               int compile_id,
 320                               int entry_bci,
 321                               CodeOffsets* offsets,
 322                               int orig_pc_offset,
 323                               DebugInformationRecorder* recorder,
 324                               Dependencies* dependencies,
 325                               CodeBuffer *code_buffer,
 326                               int frame_size,
 327                               OopMapSet* oop_maps,
 328                               ExceptionHandlerTable* handler_table,
 329                               ImplicitExceptionTable* nul_chk_table,
 330                               AbstractCompiler* compiler,
 331                               int comp_level);
 332 
 333   static nmethod* new_native_nmethod(methodHandle method,
 334                                      int compile_id,
 335                                      CodeBuffer *code_buffer,
 336                                      int vep_offset,
 337                                      int frame_complete,
 338                                      int frame_size,
 339                                      ByteSize receiver_sp_offset,
 340                                      ByteSize basic_lock_sp_offset,
 341                                      OopMapSet* oop_maps);
 342 
 343 #ifdef HAVE_DTRACE_H
 344   // The method we generate for a dtrace probe has to look
 345   // like an nmethod as far as the rest of the system is concerned
 346   // which is somewhat unfortunate.
 347   static nmethod* new_dtrace_nmethod(methodHandle method,
 348                                      CodeBuffer *code_buffer,
 349                                      int vep_offset,
 350                                      int trap_offset,
 351                                      int frame_complete,
 352                                      int frame_size);
 353 
 354   int trap_offset() const      { return _trap_offset; }
 355   address trap_address() const { return insts_begin() + _trap_offset; }
 356 
 357 #endif // def HAVE_DTRACE_H
 358 
 359   // accessors
 360   Method* method() const                          { return _method; }
 361   AbstractCompiler* compiler() const              { return _compiler; }
 362 
 363   // type info
 364   bool is_nmethod() const                         { return true; }
 365   bool is_java_method() const                     { return !method()->is_native(); }
 366   bool is_native_method() const                   { return method()->is_native(); }
 367   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
 368 
 369   bool is_compiled_by_c1() const;
 370   bool is_compiled_by_c2() const;
 371   bool is_compiled_by_shark() const;
 372 
 373   // boundaries for different parts
 374   address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
 375   address consts_end            () const          { return           header_begin() +  code_offset()        ; }
 376   address insts_begin           () const          { return           header_begin() +  code_offset()        ; }
 377   address insts_end             () const          { return           header_begin() + _stub_offset          ; }
 378   address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
 379   address stub_end              () const          { return           header_begin() + _oops_offset          ; }
 380   address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
 381   address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
 382   address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
 383   address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
 384   oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
 385   oop*    oops_end              () const          { return (oop*)   (header_begin() + _metadata_offset)     ; }
 386 
 387   Metadata** metadata_begin   () const            { return (Metadata**)  (header_begin() + _metadata_offset)     ; }
 388   Metadata** metadata_end     () const            { return (Metadata**)  (header_begin() + _scopes_data_offset)  ; }
 389 
 390   address scopes_data_begin     () const          { return           header_begin() + _scopes_data_offset   ; }
 391   address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
 392   PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
 393   PcDesc* scopes_pcs_end        () const          { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
 394   address dependencies_begin    () const          { return           header_begin() + _dependencies_offset  ; }
 395   address dependencies_end      () const          { return           header_begin() + _handler_table_offset ; }
 396   address handler_table_begin   () const          { return           header_begin() + _handler_table_offset ; }
 397   address handler_table_end     () const          { return           header_begin() + _nul_chk_table_offset ; }
 398   address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
 399   address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
 400 
 401   // Sizes
 402   int consts_size       () const                  { return            consts_end       () -            consts_begin       (); }
 403   int insts_size        () const                  { return            insts_end        () -            insts_begin        (); }
 404   int stub_size         () const                  { return            stub_end         () -            stub_begin         (); }
 405   int oops_size         () const                  { return (address)  oops_end         () - (address)  oops_begin         (); }
 406   int metadata_size     () const                  { return (address)  metadata_end     () - (address)  metadata_begin     (); }
 407   int scopes_data_size  () const                  { return            scopes_data_end  () -            scopes_data_begin  (); }
 408   int scopes_pcs_size   () const                  { return (intptr_t) scopes_pcs_end   () - (intptr_t) scopes_pcs_begin   (); }
 409   int dependencies_size () const                  { return            dependencies_end () -            dependencies_begin (); }
 410   int handler_table_size() const                  { return            handler_table_end() -            handler_table_begin(); }
 411   int nul_chk_table_size() const                  { return            nul_chk_table_end() -            nul_chk_table_begin(); }
 412 
 413   int total_size        () const;
 414 
 415   void dec_hotness_counter()        { _hotness_counter--; }
 416   void set_hotness_counter(int val) { _hotness_counter = val; }
 417   int  hotness_counter() const      { return _hotness_counter; }
 418 
 419   // Containment
 420   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
 421   bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
 422   bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
 423   bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 424   bool metadata_contains     (Metadata** addr) const   { return metadata_begin     () <= addr && addr < metadata_end     (); }
 425   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 426   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 427   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 428   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 429 
 430   // entry points
 431   address entry_point() const                     { return _entry_point;             } // normal entry point
 432   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 433 
 434   // flag accessing and manipulation
 435   bool  is_in_use() const                         { return _state == in_use; }
 436   bool  is_alive() const                          { return _state == in_use || _state == not_entrant; }
 437   bool  is_not_entrant() const                    { return _state == not_entrant; }
 438   bool  is_zombie() const                         { return _state == zombie; }
 439   bool  is_unloaded() const                       { return _state == unloaded;   }
 440 
 441 #if INCLUDE_RTM_OPT
 442   // rtm state accessing and manipulating
 443   RTMState  rtm_state() const                     { return _rtm_state; }
 444   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 445 #endif
 446 
 447   // Make the nmethod non entrant. The nmethod will continue to be
 448   // alive.  It is used when an uncommon trap happens.  Returns true
 449   // if this thread changed the state of the nmethod or false if
 450   // another thread performed the transition.
 451   bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
 452   bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 453 
 454   // used by jvmti to track if the unload event has been reported
 455   bool  unload_reported()                         { return _unload_reported; }
 456   void  set_unload_reported()                     { _unload_reported = true; }
 457 
 458   void set_unloading_next(nmethod* next)          { _unloading_next = next; }
 459   nmethod* unloading_next()                       { return _unloading_next; }
 460 
 461   static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
 462   static void increase_unloading_clock();
 463 
 464   void set_unloading_clock(unsigned char unloading_clock);
 465   unsigned char unloading_clock();
 466 
 467   bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
 468   void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
 469 
 470   void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
 471 
 472   bool has_dependencies()                         { return dependencies_size() != 0; }
 473   void flush_dependencies(BoolObjectClosure* is_alive);
 474   bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
 475   void set_has_flushed_dependencies()             {
 476     assert(!has_flushed_dependencies(), "should only happen once");
 477     _has_flushed_dependencies = 1;
 478   }
 479 
 480   bool  is_marked_for_reclamation() const         { return _marked_for_reclamation; }
 481   void  mark_for_reclamation()                    { _marked_for_reclamation = 1; }
 482 
 483   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 484   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 485 
 486   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
 487   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 488 
 489   bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
 490   void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
 491 
 492   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
 493   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
 494 
 495   int   comp_level() const                        { return _comp_level; }
 496 
 497   // Support for oops in scopes and relocs:
 498   // Note: index 0 is reserved for null.
 499   oop   oop_at(int index) const                   { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
 500   oop*  oop_addr_at(int index) const {  // for GC
 501     // relocation indexes are biased by 1 (because 0 is reserved)
 502     assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
 503     assert(!_oops_are_stale, "oops are stale");
 504     return &oops_begin()[index - 1];
 505   }
 506 
 507   // Support for meta data in scopes and relocs:
 508   // Note: index 0 is reserved for null.
 509   Metadata*     metadata_at(int index) const      { return index == 0 ? NULL: *metadata_addr_at(index); }
 510   Metadata**  metadata_addr_at(int index) const {  // for GC
 511     // relocation indexes are biased by 1 (because 0 is reserved)
 512     assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index");
 513     return &metadata_begin()[index - 1];
 514   }
 515 
 516   void copy_values(GrowableArray<jobject>* oops);
 517   void copy_values(GrowableArray<Metadata*>* metadata);
 518 
 519   // Relocation support
 520 private:
 521   void fix_oop_relocations(address begin, address end, bool initialize_immediates);
 522   inline void initialize_immediate_oop(oop* dest, jobject handle);
 523 
 524 public:
 525   void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
 526   void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
 527   void verify_oop_relocations();
 528 
 529   bool is_at_poll_return(address pc);
 530   bool is_at_poll_or_poll_return(address pc);
 531 
 532   // Scavengable oop support
 533   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
 534  protected:
 535   enum { sl_on_list = 0x01, sl_marked = 0x10 };
 536   void  set_on_scavenge_root_list()                    { _scavenge_root_state = sl_on_list; }
 537   void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
 538   // assertion-checking and pruning logic uses the bits of _scavenge_root_state
 539 #ifndef PRODUCT
 540   void  set_scavenge_root_marked()                     { _scavenge_root_state |= sl_marked; }
 541   void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~sl_marked; }
 542   bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ sl_on_list) == 0; }
 543   // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
 544 #endif //PRODUCT
 545   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
 546   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
 547 
 548  public:
 549 
 550   // Sweeper support
 551   long  stack_traversal_mark()                    { return _stack_traversal_mark; }
 552   void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
 553 
 554   // Exception cache support
 555   ExceptionCache* exception_cache() const         { return _exception_cache; }
 556   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
 557   address handler_for_exception_and_pc(Handle exception, address pc);
 558   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 559   void clean_exception_cache(BoolObjectClosure* is_alive);
 560 
 561   // implicit exceptions support
 562   address continuation_for_implicit_exception(address pc);
 563 
 564   // On-stack replacement support
 565   int   osr_entry_bci() const                     { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
 566   address  osr_entry() const                      { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
 567   void  invalidate_osr_method();
 568   nmethod* osr_link() const                       { return _osr_link; }
 569   void     set_osr_link(nmethod *n)               { _osr_link = n; }
 570 
 571   // tells whether frames described by this nmethod can be deoptimized
 572   // note: native wrappers cannot be deoptimized.
 573   bool can_be_deoptimized() const { return is_java_method(); }
 574 
 575   // Inline cache support
 576   void clear_inline_caches();
 577   void cleanup_inline_caches();
 578   bool inlinecache_check_contains(address addr) const {
 579     return (addr >= code_begin() && addr < verified_entry_point());
 580   }
 581 
 582   // Verify calls to dead methods have been cleaned.
 583   void verify_clean_inline_caches();
 584   // Verify and count cached icholder relocations.
 585   int  verify_icholder_relocations();
 586   // Check that all metadata is still alive
 587   void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
 588 
 589   // unlink and deallocate this nmethod
 590   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
 591   // expected to use any other private methods/data in this class.
 592 
 593  protected:
 594   void flush();
 595 
 596  public:
 597   // When true is returned, it is unsafe to remove this nmethod even if
 598   // it is a zombie, since the VM or the ServiceThread might still be
 599   // using it.
 600   bool is_locked_by_vm() const                    { return _lock_count >0; }
 601 
 602   // See comment at definition of _last_seen_on_stack
 603   void mark_as_seen_on_stack();
 604   bool can_not_entrant_be_converted();
 605 
 606   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
 607   void set_method(Method* method) { _method = method; }
 608 
 609   // GC support
 610   void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
 611   //  The parallel versions are used by G1.
 612   bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
 613   void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
 614   //  Unload a nmethod if the *root object is dead.
 615   bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
 616 
 617   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
 618                                      OopClosure* f);
 619   void oops_do(OopClosure* f) { oops_do(f, false); }
 620   void oops_do(OopClosure* f, bool allow_zombie);
 621   bool detect_scavenge_root_oops();
 622   void verify_scavenge_root_oops() PRODUCT_RETURN;
 623 
 624   bool test_set_oops_do_mark();
 625   static void oops_do_marking_prologue();
 626   static void oops_do_marking_epilogue();
 627   static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
 628   bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
 629 
 630   // ScopeDesc for an instruction
 631   ScopeDesc* scope_desc_at(address pc);
 632 
 633  private:
 634   ScopeDesc* scope_desc_in(address begin, address end);
 635 
 636   address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
 637 
 638   PcDesc* find_pc_desc_internal(address pc, bool approximate);
 639 
 640   PcDesc* find_pc_desc(address pc, bool approximate) {
 641     PcDesc* desc = _pc_desc_cache.last_pc_desc();
 642     if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
 643       return desc;
 644     }
 645     return find_pc_desc_internal(pc, approximate);
 646   }
 647 
 648  public:
 649   // ScopeDesc retrieval operation
 650   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
 651   // pc_desc_near returns the first PcDesc at or after the givne pc.
 652   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
 653 
 654  public:
 655   // copying of debugging information
 656   void copy_scopes_pcs(PcDesc* pcs, int count);
 657   void copy_scopes_data(address buffer, int size);
 658 
 659   // Deopt
 660   // Return true is the PC is one would expect if the frame is being deopted.
 661   bool is_deopt_pc      (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
 662   bool is_deopt_entry   (address pc) { return pc == deopt_handler_begin(); }
 663   bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
 664   // Accessor/mutator for the original pc of a frame before a frame was deopted.
 665   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
 666   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 667 
 668   static address get_deopt_original_pc(const frame* fr);
 669 
 670   // MethodHandle
 671   bool is_method_handle_return(address return_pc);
 672 
 673   // jvmti support:
 674   void post_compiled_method_load_event();
 675   jmethodID get_and_cache_jmethod_id();
 676 
 677   // verify operations
 678   void verify();
 679   void verify_scopes();
 680   void verify_interrupt_point(address interrupt_point);
 681 
 682   // printing support
 683   void print()                          const;
 684   void print_code();
 685   void print_relocations()                        PRODUCT_RETURN;
 686   void print_pcs()                                PRODUCT_RETURN;
 687   void print_scopes()                             PRODUCT_RETURN;
 688   void print_dependencies()                       PRODUCT_RETURN;
 689   void print_value_on(outputStream* st) const     PRODUCT_RETURN;
 690   void print_calls(outputStream* st)              PRODUCT_RETURN;
 691   void print_handler_table()                      PRODUCT_RETURN;
 692   void print_nul_chk_table()                      PRODUCT_RETURN;
 693   void print_nmethod(bool print_code);
 694 
 695   // need to re-define this from CodeBlob else the overload hides it
 696   virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
 697   void print_on(outputStream* st, const char* msg) const;
 698 
 699   // Logging
 700   void log_identity(xmlStream* log) const;
 701   void log_new_nmethod() const;
 702   void log_state_change() const;
 703 
 704   // Prints block-level comments, including nmethod specific block labels:
 705   virtual void print_block_comment(outputStream* stream, address block_begin) const {
 706     print_nmethod_labels(stream, block_begin);
 707     CodeBlob::print_block_comment(stream, block_begin);
 708   }
 709   void print_nmethod_labels(outputStream* stream, address block_begin) const;
 710 
 711   // Prints a comment for one native instruction (reloc info, pc desc)
 712   void print_code_comment_on(outputStream* st, int column, address begin, address end);
 713   static void print_statistics()                  PRODUCT_RETURN;
 714 
 715   // Compiler task identification.  Note that all OSR methods
 716   // are numbered in an independent sequence if CICountOSR is true,
 717   // and native method wrappers are also numbered independently if
 718   // CICountNative is true.
 719   int  compile_id() const                         { return _compile_id; }
 720   const char* compile_kind() const;
 721 
 722   // For debugging
 723   // CompiledIC*    IC_at(char* p) const;
 724   // PrimitiveIC*   primitiveIC_at(char* p) const;
 725   oop embeddedOop_at(address p);
 726 
 727   // tells if any of this method's dependencies have been invalidated
 728   // (this is expensive!)
 729   static void check_all_dependencies(DepChange& changes);
 730 
 731   // tells if this compiled method is dependent on the given changes,
 732   // and the changes have invalidated it
 733   bool check_dependency_on(DepChange& changes);
 734 
 735   // Evolution support. Tells if this compiled method is dependent on any of
 736   // methods m() of class dependee, such that if m() in dependee is replaced,
 737   // this compiled method will have to be deoptimized.
 738   bool is_evol_dependent_on(Klass* dependee);
 739 
 740   // Fast breakpoint support. Tells if this compiled method is
 741   // dependent on the given method. Returns true if this nmethod
 742   // corresponds to the given method as well.
 743   bool is_dependent_on_method(Method* dependee);
 744 
 745   // is it ok to patch at address?
 746   bool is_patchable_at(address instr_address);
 747 
 748   // UseBiasedLocking support
 749   ByteSize native_receiver_sp_offset() {
 750     return _native_receiver_sp_offset;
 751   }
 752   ByteSize native_basic_lock_sp_offset() {
 753     return _native_basic_lock_sp_offset;
 754   }
 755 
 756   // support for code generation
 757   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
 758   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
 759   static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }
 760 
 761   // RedefineClasses support.   Mark metadata in nmethods as on_stack so that
 762   // redefine classes doesn't purge it.
 763   static void mark_on_stack(nmethod* nm) {
 764     nm->metadata_do(Metadata::mark_on_stack);
 765   }
 766   void metadata_do(void f(Metadata*));
 767 };
 768 
 769 // Locks an nmethod so its code will not get removed and it will not
 770 // be made into a zombie, even if it is a not_entrant method. After the
 771 // nmethod becomes a zombie, if CompiledMethodUnload event processing
 772 // needs to be done, then lock_nmethod() is used directly to keep the
 773 // generated code from being reused too early.
 774 class nmethodLocker : public StackObj {
 775   nmethod* _nm;
 776 
 777  public:
 778 
 779   // note: nm can be NULL
 780   // Only JvmtiDeferredEvent::compiled_method_unload_event()
 781   // should pass zombie_ok == true.
 782   static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
 783   static void unlock_nmethod(nmethod* nm); // (ditto)
 784 
 785   nmethodLocker(address pc); // derive nm from pc
 786   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
 787   nmethodLocker() { _nm = NULL; }
 788   ~nmethodLocker() { unlock_nmethod(_nm); }
 789 
 790   nmethod* code() { return _nm; }
 791   void set_code(nmethod* new_nm) {
 792     unlock_nmethod(_nm);   // note:  This works even if _nm==new_nm.
 793     _nm = new_nm;
 794     lock_nmethod(_nm);
 795   }
 796 };
 797 
 798 #endif // SHARE_VM_CODE_NMETHOD_HPP