src/share/vm/code/compiledMethod.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/code

src/share/vm/code/compiledMethod.hpp

Print this page




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_CODE_COMPILEDMETHOD_HPP
  26 #define SHARE_VM_CODE_COMPILEDMETHOD_HPP
  27 
  28 #include "code/codeBlob.hpp"
  29 #include "code/pcDesc.hpp"
  30 #include "oops/metadata.hpp"
  31 
  32 class Dependencies;
  33 class ExceptionHandlerTable;
  34 class ImplicitExceptionTable;
  35 class AbstractCompiler;
  36 class xmlStream;
  37 class CompiledStaticCall;

  38 
  39 // This class is used internally by nmethods, to cache
  40 // exception/pc/handler information.
  41 
  42 class ExceptionCache : public CHeapObj<mtCode> {
  43   friend class VMStructs;
  44  private:
  45   enum { cache_size = 16 };
  46   Klass*   _exception_type;
  47   address  _pc[cache_size];
  48   address  _handler[cache_size];
  49   volatile int _count;
  50   ExceptionCache* _next;
  51 
  52   address pc_at(int index)                     { assert(index >= 0 && index < count(),""); return _pc[index]; }
  53   void    set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  54   address handler_at(int index)                { assert(index >= 0 && index < count(),""); return _handler[index]; }
  55   void    set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  56   int     count()                              { return OrderAccess::load_acquire(&_count); }
  57   // increment_count is only called under lock, but there may be concurrent readers.


 317   // implicit exceptions support
 318   virtual address continuation_for_implicit_exception(address pc) { return NULL; }
 319 
 320   static address get_deopt_original_pc(const frame* fr);
 321 
 322   // Inline cache support
 323   void cleanup_inline_caches(bool clean_all = false);
 324   virtual void clear_inline_caches();
 325   void clear_ic_stubs();
 326 
 327   // Verify and count cached icholder relocations.
 328   int  verify_icholder_relocations();
 329   void verify_oop_relocations();
 330 
 331   virtual bool is_evol_dependent_on(Klass* dependee) = 0;
 332   // Fast breakpoint support. Tells if this compiled method is
 333   // dependent on the given method. Returns true if this nmethod
 334   // corresponds to the given method as well.
 335   virtual bool is_dependent_on_method(Method* dependee) = 0;
 336 








 337   Method* attached_method(address call_pc);
 338   Method* attached_method_before_pc(address pc);
 339 
 340   virtual void metadata_do(void f(Metadata*)) = 0;
 341 
 342   // GC support
 343 
 344   void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
 345   CompiledMethod* unloading_next()              { return _unloading_next; }
 346 
 347   void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive);
 348 
 349   // Check that all metadata is still alive
 350   void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
 351 
 352   virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
 353   //  The parallel versions are used by G1.
 354   virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
 355   virtual void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
 356 




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_CODE_COMPILEDMETHOD_HPP
  26 #define SHARE_VM_CODE_COMPILEDMETHOD_HPP
  27 
  28 #include "code/codeBlob.hpp"
  29 #include "code/pcDesc.hpp"
  30 #include "oops/metadata.hpp"
  31 
  32 class Dependencies;
  33 class ExceptionHandlerTable;
  34 class ImplicitExceptionTable;
  35 class AbstractCompiler;
  36 class xmlStream;
  37 class CompiledStaticCall;
  38 class NativeCallWrapper;
  39 
  40 // This class is used internally by nmethods, to cache
  41 // exception/pc/handler information.
  42 
  43 class ExceptionCache : public CHeapObj<mtCode> {
  44   friend class VMStructs;
  45  private:
  46   enum { cache_size = 16 };
  47   Klass*   _exception_type;
  48   address  _pc[cache_size];
  49   address  _handler[cache_size];
  50   volatile int _count;
  51   ExceptionCache* _next;
  52 
  53   address pc_at(int index)                     { assert(index >= 0 && index < count(),""); return _pc[index]; }
  54   void    set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  55   address handler_at(int index)                { assert(index >= 0 && index < count(),""); return _handler[index]; }
  56   void    set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  57   int     count()                              { return OrderAccess::load_acquire(&_count); }
  58   // increment_count is only called under lock, but there may be concurrent readers.


 318   // implicit exceptions support
 319   virtual address continuation_for_implicit_exception(address pc) { return NULL; }
 320 
 321   static address get_deopt_original_pc(const frame* fr);
 322 
 323   // Inline cache support
 324   void cleanup_inline_caches(bool clean_all = false);
 325   virtual void clear_inline_caches();
 326   void clear_ic_stubs();
 327 
 328   // Verify and count cached icholder relocations.
 329   int  verify_icholder_relocations();
 330   void verify_oop_relocations();
 331 
 332   virtual bool is_evol_dependent_on(Klass* dependee) = 0;
 333   // Fast breakpoint support. Tells if this compiled method is
 334   // dependent on the given method. Returns true if this nmethod
 335   // corresponds to the given method as well.
 336   virtual bool is_dependent_on_method(Method* dependee) = 0;
 337 
 338   virtual NativeCallWrapper* call_wrapper_at(address call) const = 0;
 339   virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0;
 340   virtual address call_instruction_address(address pc) const = 0;
 341 
 342   virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0;
 343   virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0;
 344   virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0;
 345 
 346   Method* attached_method(address call_pc);
 347   Method* attached_method_before_pc(address pc);
 348 
 349   virtual void metadata_do(void f(Metadata*)) = 0;
 350 
 351   // GC support
 352 
 353   void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
 354   CompiledMethod* unloading_next()              { return _unloading_next; }
 355 
 356   void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive);
 357 
 358   // Check that all metadata is still alive
 359   void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
 360 
 361   virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
 362   //  The parallel versions are used by G1.
 363   virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
 364   virtual void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
 365 


src/share/vm/code/compiledMethod.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File