1 /*
   2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_CODE_COMPILEDMETHOD_HPP
  26 #define SHARE_VM_CODE_COMPILEDMETHOD_HPP
  27 
  28 #include "code/codeBlob.hpp"
  29 #include "code/nativeInst.hpp"
  30 #include "code/pcDesc.hpp"
  31 #include "oops/metadata.hpp"
  32 
  33 class Dependencies;
  34 class ExceptionHandlerTable;
  35 class ImplicitExceptionTable;
  36 class AbstractCompiler;
  37 class xmlStream;
  38 class CompiledStaticCall;
  39 class NativeCallWrapper;
  40 
  41 // This class is used internally by nmethods, to cache
  42 // exception/pc/handler information.
  43 
  44 class ExceptionCache : public CHeapObj<mtCode> {
  45   friend class VMStructs;
  46  private:
  47   enum { cache_size = 16 };
  48   Klass*   _exception_type;
  49   address  _pc[cache_size];
  50   address  _handler[cache_size];
  51   volatile int _count;
  52   ExceptionCache* _next;
  53 
  54   address pc_at(int index)                     { assert(index >= 0 && index < count(),""); return _pc[index]; }
  55   void    set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  56   address handler_at(int index)                { assert(index >= 0 && index < count(),""); return _handler[index]; }
  57   void    set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  58   int     count()                              { return OrderAccess::load_acquire(&_count); }
  59   // increment_count is only called under lock, but there may be concurrent readers.
  60   void    increment_count()                    { OrderAccess::release_store(&_count, _count + 1); }
  61 
  62  public:
  63 
  64   ExceptionCache(Handle exception, address pc, address handler);
  65 
  66   Klass*    exception_type()                { return _exception_type; }
  67   ExceptionCache* next()                    { return _next; }
  68   void      set_next(ExceptionCache *ec)    { _next = ec; }
  69 
  70   address match(Handle exception, address pc);
  71   bool    match_exception_with_space(Handle exception) ;
  72   address test_address(address addr);
  73   bool    add_address_and_handler(address addr, address handler) ;
  74 };
  75 
  76 class nmethod;
  77 
  78 // cache pc descs found in earlier inquiries
  79 class PcDescCache VALUE_OBJ_CLASS_SPEC {
  80   friend class VMStructs;
  81  private:
  82   enum { cache_size = 4 };
  83   // The array elements MUST be volatile! Several threads may modify
  84   // and read from the cache concurrently. find_pc_desc_internal has
  85   // returned wrong results. C++ compiler (namely xlC12) may duplicate
  86   // C++ field accesses if the elements are not volatile.
  87   typedef PcDesc* PcDescPtr;
  88   volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
  89  public:
  90   PcDescCache() { debug_only(_pc_descs[0] = NULL); }
  91   void    reset_to(PcDesc* initial_pc_desc);
  92   PcDesc* find_pc_desc(int pc_offset, bool approximate);
  93   void    add_pc_desc(PcDesc* pc_desc);
  94   PcDesc* last_pc_desc() { return _pc_descs[0]; }
  95 };
  96 
  97 class PcDescSearch {
  98 private:
  99   address _code_begin;
 100   PcDesc* _lower;
 101   PcDesc* _upper;
 102 public:
 103   PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
 104     _code_begin(code), _lower(lower), _upper(upper)
 105   {
 106   }
 107 
 108   address code_begin() const { return _code_begin; }
 109   PcDesc* scopes_pcs_begin() const { return _lower; }
 110   PcDesc* scopes_pcs_end() const { return _upper; }
 111 };
 112 
 113 class PcDescContainer VALUE_OBJ_CLASS_SPEC {
 114 private:
 115   PcDescCache _pc_desc_cache;
 116 public:
 117   PcDescContainer() {}
 118 
 119   PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
 120   void    reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
 121 
 122   PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
 123     address base_address = search.code_begin();
 124     PcDesc* desc = _pc_desc_cache.last_pc_desc();
 125     if (desc != NULL && desc->pc_offset() == pc - base_address) {
 126       return desc;
 127     }
 128     return find_pc_desc_internal(pc, approximate, search);
 129   }
 130 };
 131 
 132 
 133 class CompiledMethod : public CodeBlob {
 134   friend class VMStructs;
 135   friend class NMethodSweeper;
 136 
 137   void init_defaults();
 138 protected:
 139   enum MarkForDeoptimizationStatus {
 140     not_marked,
 141     deoptimize,
 142     deoptimize_noupdate
 143   };
 144 
 145   MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
 146 
 147   bool _is_far_code; // Code is far from CodeCache.
 148                      // Have to use far call instructions to call it from code in CodeCache.
 149   // set during construction
 150   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 151   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 152   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 153   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 154 
 155   Method*   _method;
 156   address _scopes_data_begin;
 157   // All deoptee's will resume execution at this location described by
 158   // this address.
 159   address _deopt_handler_begin;
 160   // All deoptee's at a MethodHandle call site will resume execution
 161   // at this location described by this offset.
 162   address _deopt_mh_handler_begin;
 163 
 164   PcDescContainer _pc_desc_container;
 165   ExceptionCache * volatile _exception_cache;
 166 
 167   virtual void flush() = 0;
 168 protected:
 169   CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
 170   CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
 171 
 172 public:
 173   virtual bool is_compiled() const                { return true; }
 174 
 175   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 176   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 177 
 178   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
 179   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 180 
 181   bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
 182   void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
 183 
 184   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
 185   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
 186 
 187   enum { in_use       = 0,   // executable nmethod
 188          not_used     = 1,   // not entrant, but revivable
 189          not_entrant  = 2,   // marked for deoptimization but activations may still exist,
 190                              // will be transformed to zombie when all activations are gone
 191          zombie       = 3,   // no activations exist, nmethod is ready for purge
 192          unloaded     = 4    // there should be no activations, should not be called,
 193                              // will be transformed to zombie immediately
 194   };
 195 
 196   virtual bool  is_in_use() const = 0;
 197   virtual int   comp_level() const = 0;
 198   virtual int   compile_id() const = 0;
 199 
 200   virtual address verified_entry_point() const = 0;
 201   virtual void log_identity(xmlStream* log) const = 0;
 202   virtual void log_state_change() const = 0;
 203   virtual bool make_not_used() = 0;
 204   virtual bool make_not_entrant() = 0;
 205   virtual bool make_entrant() = 0;
 206   virtual address entry_point() const = 0;
 207   virtual bool make_zombie() = 0;
 208   virtual bool is_osr_method() const = 0;
 209   virtual int osr_entry_bci() const = 0;
 210   Method* method() const                          { return _method; }
 211   virtual void print_pcs() = 0;
 212   bool is_native_method() const { return _method != NULL && _method->is_native(); }
 213   bool is_java_method() const { return _method != NULL && !_method->is_native(); }
 214 
 215   // ScopeDesc retrieval operation
 216   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
 217   // pc_desc_near returns the first PcDesc at or after the givne pc.
 218   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
 219 
 220   // ScopeDesc for an instruction
 221   ScopeDesc* scope_desc_at(address pc);
 222 
 223   bool is_at_poll_return(address pc);
 224   bool is_at_poll_or_poll_return(address pc);
 225 
 226   bool  is_marked_for_deoptimization() const      { return _mark_for_deoptimization_status != not_marked; }
 227   void  mark_for_deoptimization(bool inc_recompile_counts = true) {
 228     _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
 229   }
 230   bool update_recompile_counts() const {
 231     // Update recompile counts when either the update is explicitly requested (deoptimize)
 232     // or the nmethod is not marked for deoptimization at all (not_marked).
 233     // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
 234     return _mark_for_deoptimization_status != deoptimize_noupdate;
 235   }
 236 
 237   // tells whether frames described by this nmethod can be deoptimized
 238   // note: native wrappers cannot be deoptimized.
 239   bool can_be_deoptimized() const { return is_java_method(); }
 240 
 241   virtual oop oop_at(int index) const = 0;
 242   virtual Metadata* metadata_at(int index) const = 0;
 243 
 244   address scopes_data_begin() const { return _scopes_data_begin; }
 245   virtual address scopes_data_end() const = 0;
 246   int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); }
 247 
 248   virtual PcDesc* scopes_pcs_begin() const = 0;
 249   virtual PcDesc* scopes_pcs_end() const = 0;
 250   int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); }
 251 
 252   address insts_begin() const { return code_begin(); }
 253   address insts_end() const { return stub_begin(); }
 254   // Returns true if a given address is in the 'insts' section. The method
 255   // insts_contains_inclusive() is end-inclusive.
 256   bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); }
 257   bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); }
 258 
 259   int insts_size() const { return insts_end() - insts_begin(); }
 260 
 261   virtual address consts_begin() const = 0;
 262   virtual address consts_end() const = 0;
 263   bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); }
 264   int consts_size() const { return consts_end() - consts_begin(); }
 265 
 266   virtual address stub_begin() const = 0;
 267   virtual address stub_end() const = 0;
 268   bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
 269   int stub_size() const { return stub_end() - stub_begin(); }
 270 
 271   virtual address handler_table_begin() const = 0;
 272   virtual address handler_table_end() const = 0;
 273   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 274   int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
 275 
 276   virtual address exception_begin() const = 0;
 277 
 278   virtual address nul_chk_table_begin() const = 0;
 279   virtual address nul_chk_table_end() const = 0;
 280   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 281   int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
 282 
 283   virtual oop* oop_addr_at(int index) const = 0;
 284   virtual Metadata** metadata_addr_at(int index) const = 0;
 285   virtual void    set_original_pc(const frame* fr, address pc) = 0;
 286 
 287   // Exception cache support
 288   // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
 289   ExceptionCache* exception_cache() const         { return _exception_cache; }
 290   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
 291   void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
 292   address handler_for_exception_and_pc(Handle exception, address pc);
 293   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 294   void clean_exception_cache(BoolObjectClosure* is_alive);
 295 
 296   void add_exception_cache_entry(ExceptionCache* new_entry);
 297   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 298 
 299   // MethodHandle
 300   bool is_method_handle_return(address return_pc);
 301   address deopt_mh_handler_begin() const  { return _deopt_mh_handler_begin; }
 302 
 303   address deopt_handler_begin() const { return _deopt_handler_begin; }
 304   virtual address get_original_pc(const frame* fr) = 0;
 305   // Deopt
 306   // Return true is the PC is one would expect if the frame is being deopted.
 307   bool is_deopt_pc      (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
 308   bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
 309   bool is_deopt_entry(address pc) {
 310     return pc == deopt_handler_begin()
 311 #if INCLUDE_JVMCI
 312       // When using JVMCI the address might be off by the size of a call instruction.
 313       || (is_compiled_by_jvmci() && pc == (deopt_handler_begin() + NativeCall::instruction_size))
 314 #endif
 315       ;
 316   }
 317 
 318   virtual bool can_convert_to_zombie() = 0;
 319   virtual const char* compile_kind() const = 0;
 320   virtual int get_state() const = 0;
 321 
 322   const char* state() const;
 323 
 324   bool is_far_code() const { return _is_far_code; }
 325 
 326   bool inlinecache_check_contains(address addr) const {
 327     return (addr >= code_begin() && addr < verified_entry_point());
 328   }
 329 
 330   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
 331 
 332   // implicit exceptions support
 333   virtual address continuation_for_implicit_exception(address pc) { return NULL; }
 334 
 335   static address get_deopt_original_pc(const frame* fr) {
 336     if (fr->cb() == NULL)  return NULL;
 337 
 338     CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
 339     if (cm != NULL && cm->is_deopt_pc(fr->pc()))
 340       return cm->get_original_pc(fr);
 341 
 342     return NULL;
 343   }
 344 
 345   // Inline cache support
 346   void cleanup_inline_caches(bool clean_all = false);
 347   virtual void clear_inline_caches();
 348   void clear_ic_stubs();
 349 
 350   // Verify and count cached icholder relocations.
 351   int  verify_icholder_relocations();
 352   void verify_oop_relocations();
 353 
 354   virtual bool is_evol_dependent_on(Klass* dependee) = 0;
 355   // Fast breakpoint support. Tells if this compiled method is
 356   // dependent on the given method. Returns true if this nmethod
 357   // corresponds to the given method as well.
 358   virtual bool is_dependent_on_method(Method* dependee) = 0;
 359 
 360   virtual NativeCallWrapper* call_wrapper_at(address call) const = 0;
 361   virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0;
 362   virtual address call_instruction_address(address pc) const = 0;
 363 
 364   virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0;
 365   virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0;
 366   virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0;
 367 
 368   Method* attached_method(address call_pc);
 369   Method* attached_method_before_pc(address pc);
 370 
 371   virtual void metadata_do(void f(Metadata*)) = 0;
 372 
 373   // GC support
 374 
 375   void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
 376   CompiledMethod* unloading_next()              { return _unloading_next; }
 377 
 378   void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive);
 379 
 380   // Check that all metadata is still alive
 381   void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
 382 
 383   virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
 384   //  The parallel versions are used by G1.
 385   virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
 386   virtual void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
 387 
 388   static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
 389   static void increase_unloading_clock();
 390 
 391   void set_unloading_clock(unsigned char unloading_clock);
 392   unsigned char unloading_clock();
 393 
 394 protected:
 395   virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
 396 #if INCLUDE_JVMCI
 397   virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
 398 #endif
 399 
 400 private:
 401   // GC support to help figure out if an nmethod has been
 402   // cleaned/unloaded by the current GC.
 403   static unsigned char _global_unloading_clock;
 404 
 405   volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
 406 
 407   PcDesc* find_pc_desc(address pc, bool approximate) {
 408     return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
 409   }
 410 
 411 protected:
 412   union {
 413     // Used by G1 to chain nmethods.
 414     CompiledMethod* _unloading_next;
 415     // Used by non-G1 GCs to chain nmethods.
 416     nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
 417   };
 418 };
 419 
 420 #endif //SHARE_VM_CODE_COMPILEDMETHOD_HPP