src/share/vm/code/compiledIC.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/code

src/share/vm/code/compiledIC.hpp

Print this page




  51 //
  52 // The numbers in square brackets refere to the kind of transition:
  53 // [1]: Initial fixup. Receiver it found from debug information
  54 // [2]: Compilation of a method
  55 // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
  56 // [4]: Inline cache miss. We go directly to megamorphic call.
  57 //
  58 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
  59 // transition is made to a stub.
  60 //
  61 class CompiledIC;
  62 class ICStub;
  63 
  64 class CompiledICInfo : public StackObj {
  65  private:
  66   address _entry;              // entry point for call
  67   void*   _cached_value;         // Value of cached_value (either in stub or inline cache)
  68   bool    _is_icholder;          // Is the cached value a CompiledICHolder*
  69   bool    _is_optimized;       // it is an optimized virtual call (i.e., can be statically bound)
  70   bool    _to_interpreter;     // Call it to interpreter

  71   bool    _release_icholder;
  72  public:
  73   address entry() const        { return _entry; }
  74   Metadata*    cached_metadata() const         { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
  75   CompiledICHolder*    claim_cached_icholder() {
  76     assert(_is_icholder, "");
  77     assert(_cached_value != NULL, "must be non-NULL");
  78     _release_icholder = false;
  79     CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
  80     icholder->claim();
  81     return icholder;
  82   }
  83   bool    is_optimized() const { return _is_optimized; }
  84   bool         to_interpreter() const  { return _to_interpreter; }

  85 
  86   void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
  87     _entry      = entry;
  88     _cached_value = (void*)klass;
  89     _to_interpreter = false;

  90     _is_icholder = false;
  91     _is_optimized = is_optimized;
  92     _release_icholder = false;
  93   }
  94 
  95   void set_interpreter_entry(address entry, Method* method) {
  96     _entry      = entry;
  97     _cached_value = (void*)method;
  98     _to_interpreter = true;











  99     _is_icholder = false;
 100     _is_optimized = true;
 101     _release_icholder = false;
 102   }
 103 
 104   void set_icholder_entry(address entry, CompiledICHolder* icholder) {
 105     _entry      = entry;
 106     _cached_value = (void*)icholder;
 107     _to_interpreter = true;

 108     _is_icholder = true;
 109     _is_optimized = false;
 110     _release_icholder = true;
 111   }
 112 
 113   CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
 114                     _to_interpreter(false), _is_optimized(false), _release_icholder(false) {
 115   }
 116   ~CompiledICInfo() {
 117     // In rare cases the info is computed but not used, so release any
 118     // CompiledICHolder* that was created
 119     if (_release_icholder) {
 120       assert(_is_icholder, "must be");
 121       CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
 122       icholder->claim();
 123       delete icholder;
 124     }
 125   }
 126 };
 127 





















 128 class CompiledIC: public ResourceObj {
 129   friend class InlineCacheBuffer;
 130   friend class ICStub;
 131 
 132 
 133  private:
 134   NativeCall*   _ic_call;       // the call instruction
 135   NativeMovConstReg* _value;    // patchable value cell for this IC
 136   bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)

 137 
 138   CompiledIC(CompiledMethod* cm, NativeCall* ic_call);
 139   CompiledIC(RelocIterator* iter);
 140 
 141   void initialize_from_iter(RelocIterator* iter);
 142 
 143   static bool is_icholder_entry(address entry);
 144 
 145   // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
 146   // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
 147   // changes to a transition stub.
 148   void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
 149   void set_ic_destination(ICStub* stub);
 150   void set_ic_destination(address entry_point) {
 151     assert(_is_optimized, "use set_ic_destination_and_value instead");
 152     internal_set_ic_destination(entry_point, false, NULL, false);
 153   }
 154   // This only for use by ICStubs where the type of the value isn't known
 155   void set_ic_destination_and_value(address entry_point, void* value) {
 156     internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));


 160   }
 161   void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
 162     internal_set_ic_destination(entry_point, false, value, true);
 163   }
 164 
 165   // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
 166   // associated with the inline cache.
 167   address stub_address() const;
 168   bool is_in_transition_state() const;  // Use InlineCacheBuffer
 169 
 170  public:
 171   // conversion (machine PC to CompiledIC*)
 172   friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
 173   friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
 174   friend CompiledIC* CompiledIC_at(Relocation* call_site);
 175   friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
 176 
 177   // This is used to release CompiledICHolder*s from nmethods that
 178   // are about to be freed.  The callsite might contain other stale
 179   // values of other kinds so it must be careful.
 180   static void cleanup_call_site(virtual_call_Relocation* call_site);
 181   static bool is_icholder_call_site(virtual_call_Relocation* call_site);
 182 
 183   // Return the cached_metadata/destination associated with this inline cache. If the cache currently points
 184   // to a transition stub, it will read the values from the transition stub.
 185   void* cached_value() const;
 186   CompiledICHolder* cached_icholder() const {
 187     assert(is_icholder_call(), "must be");
 188     return (CompiledICHolder*) cached_value();
 189   }
 190   Metadata* cached_metadata() const {
 191     assert(!is_icholder_call(), "must be");
 192     return (Metadata*) cached_value();
 193   }
 194 








 195   address ic_destination() const;
 196 
 197   bool is_optimized() const   { return _is_optimized; }
 198 
 199   // State
 200   bool is_clean() const;
 201   bool is_megamorphic() const;
 202   bool is_call_to_compiled() const;
 203   bool is_call_to_interpreted() const;
 204 
 205   bool is_icholder_call() const;
 206 
 207   address end_of_call() { return  _ic_call->return_address(); }
 208 
 209   // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
 210   // so you are guaranteed that no patching takes place. The same goes for verify.
 211   //
 212   // Note: We do not provide any direct access to the stub code, to prevent parts of the code
 213   // to manipulate the inline cache in MT-unsafe ways.
 214   //
 215   // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
 216   //
 217   void set_to_clean(bool in_use = true);
 218   void set_to_monomorphic(CompiledICInfo& info);
 219   void clear_ic_stub();
 220 
 221   // Returns true if successful and false otherwise. The call can fail if memory
 222   // allocation in the code cache fails.
 223   bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
 224 
 225   static void compute_monomorphic_entry(const methodHandle& method, KlassHandle receiver_klass,
 226                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);

 227 
 228   // Location
 229   address instruction_address() const { return _ic_call->instruction_address(); }
 230 
 231   // Misc
 232   void print()             PRODUCT_RETURN;
 233   void print_compiled_ic() PRODUCT_RETURN;
 234   void verify()            PRODUCT_RETURN;
 235 };
 236 
 237 inline CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) {
 238   CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
 239   c_ic->verify();
 240   return c_ic;
 241 }
 242 
 243 inline CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
 244   CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
 245   c_ic->verify();
 246   return c_ic;
 247 }
 248 
 249 inline CompiledIC* CompiledIC_at(Relocation* call_site) {


 261   c_ic->verify();
 262   return c_ic;
 263 }
 264 
 265 //-----------------------------------------------------------------------------
 266 // The CompiledStaticCall represents a call to a static method in the compiled
 267 //
 268 // Transition diagram of a static call site is somewhat simpler than for an inlined cache:
 269 //
 270 //
 271 //           -----<----- Clean ----->-----
 272 //          /                             \
 273 //         /                               \
 274 //    compilled code <------------> interpreted code
 275 //
 276 //  Clean:            Calls directly to runtime method for fixup
 277 //  Compiled code:    Calls directly to compiled code
 278 //  Interpreted code: Calls to stub that set Method* reference
 279 //
 280 //
 281 class CompiledStaticCall;
 282 
 283 class StaticCallInfo {
 284  private:
 285   address      _entry;          // Entrypoint
 286   methodHandle _callee;         // Callee (used when calling interpreter)
 287   bool         _to_interpreter; // call to interpreted method (otherwise compiled)

 288 
 289   friend class CompiledStaticCall;


 290  public:
 291   address      entry() const    { return _entry;  }
 292   methodHandle callee() const   { return _callee; }
 293 };
 294 
 295 
 296 class CompiledStaticCall: public NativeCall {
 297   friend class CompiledIC;
 298 
 299   // Also used by CompiledIC
 300   void set_to_interpreted(methodHandle callee, address entry);
 301   bool is_optimized_virtual();
 302 
 303  public:
 304   friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
 305   friend CompiledStaticCall* compiledStaticCall_at(address native_call);
 306   friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
 307 
 308   // Code
 309   static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL);
 310   static int to_interp_stub_size();
 311   static int reloc_to_interp_stub();



 312 
 313   // State
 314   bool is_clean() const;
 315   bool is_call_to_compiled() const;
 316   bool is_call_to_interpreted() const;


 317 
 318   // Clean static call (will force resolving on next use)
 319   void set_to_clean();
 320 
 321   // Set state. The entry must be the same, as computed by compute_entry.
 322   // Computation and setting is split up, since the actions are separate during
 323   // a OptoRuntime::resolve_xxx.
 324   void set(const StaticCallInfo& info);
 325 
 326   // Compute entry point given a method
 327   static void compute_entry(const methodHandle& m, StaticCallInfo& info);


 328 
 329   // Stub support
 330   address find_stub();
 331   static void set_stub_to_clean(static_stub_Relocation* static_stub);






 332 
 333   // Misc.
 334   void print()  PRODUCT_RETURN;
 335   void verify() PRODUCT_RETURN;
 336 };
 337 




 338 
 339 inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
 340   CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);













 341   st->verify();
 342   return st;
 343 }
 344 
 345 inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
 346   CompiledStaticCall* st = (CompiledStaticCall*)native_call;
 347   st->verify();
 348   return st;
 349 }
 350 
 351 inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
 352   return compiledStaticCall_at(call_site->addr());
 353 }





















 354 
 355 #endif // SHARE_VM_CODE_COMPILEDIC_HPP


  51 //
  52 // The numbers in square brackets refere to the kind of transition:
  53 // [1]: Initial fixup. Receiver it found from debug information
  54 // [2]: Compilation of a method
  55 // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
  56 // [4]: Inline cache miss. We go directly to megamorphic call.
  57 //
  58 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
  59 // transition is made to a stub.
  60 //
  61 class CompiledIC;
  62 class ICStub;
  63 
  64 class CompiledICInfo : public StackObj {
  65  private:
  66   address _entry;              // entry point for call
  67   void*   _cached_value;         // Value of cached_value (either in stub or inline cache)
  68   bool    _is_icholder;          // Is the cached value a CompiledICHolder*
  69   bool    _is_optimized;       // it is an optimized virtual call (i.e., can be statically bound)
  70   bool    _to_interpreter;     // Call it to interpreter
  71   bool    _to_aot;             // Call it to aot code
  72   bool    _release_icholder;
  73  public:
  74   address entry() const        { return _entry; }
  75   Metadata*    cached_metadata() const         { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
  76   CompiledICHolder*    claim_cached_icholder() {
  77     assert(_is_icholder, "");
  78     assert(_cached_value != NULL, "must be non-NULL");
  79     _release_icholder = false;
  80     CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
  81     icholder->claim();
  82     return icholder;
  83   }
  84   bool    is_optimized() const { return _is_optimized; }
  85   bool  to_interpreter() const { return _to_interpreter; }
  86   bool          to_aot() const { return _to_aot; }
  87 
  88   void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
  89     _entry      = entry;
  90     _cached_value = (void*)klass;
  91     _to_interpreter = false;
  92     _to_aot = false;
  93     _is_icholder = false;
  94     _is_optimized = is_optimized;
  95     _release_icholder = false;
  96   }
  97 
  98   void set_interpreter_entry(address entry, Method* method) {
  99     _entry      = entry;
 100     _cached_value = (void*)method;
 101     _to_interpreter = true;
 102     _to_aot = false;
 103     _is_icholder = false;
 104     _is_optimized = true;
 105     _release_icholder = false;
 106   }
 107 
 108   void set_aot_entry(address entry, Method* method) {
 109     _entry      = entry;
 110     _cached_value = (void*)method;
 111     _to_interpreter = false;
 112     _to_aot = true;
 113     _is_icholder = false;
 114     _is_optimized = true;
 115     _release_icholder = false;
 116   }
 117 
 118   void set_icholder_entry(address entry, CompiledICHolder* icholder) {
 119     _entry      = entry;
 120     _cached_value = (void*)icholder;
 121     _to_interpreter = true;
 122     _to_aot = false;
 123     _is_icholder = true;
 124     _is_optimized = false;
 125     _release_icholder = true;
 126   }
 127 
 128   CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
 129                     _to_interpreter(false), _to_aot(false), _is_optimized(false), _release_icholder(false) {
 130   }
 131   ~CompiledICInfo() {
 132     // In rare cases the info is computed but not used, so release any
 133     // CompiledICHolder* that was created
 134     if (_release_icholder) {
 135       assert(_is_icholder, "must be");
 136       CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
 137       icholder->claim();
 138       delete icholder;
 139     }
 140   }
 141 };
 142 
 143 class NativeCallWrapper: public ResourceObj {
 144 public:
 145   virtual address destination() const = 0;
 146   virtual address instruction_address() const = 0;
 147   virtual address next_instruction_address() const = 0;
 148   virtual address return_address() const = 0;
 149   virtual address get_resolve_call_stub(bool is_optimized) const = 0;
 150   virtual void set_destination_mt_safe(address dest) = 0;
 151   virtual void set_to_interpreted(methodHandle method, CompiledICInfo& info) = 0;
 152   virtual void verify() const = 0;
 153   virtual void verify_resolve_call(address dest) const = 0;
 154 
 155   virtual bool is_call_to_interpreted(address dest) const = 0;
 156   virtual bool is_safe_for_patching() const = 0;
 157 
 158   virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const = 0;
 159 
 160   virtual void *get_data(NativeInstruction* instruction) const = 0;
 161   virtual void set_data(NativeInstruction* instruction, intptr_t data) = 0;
 162 };
 163 
 164 class CompiledIC: public ResourceObj {
 165   friend class InlineCacheBuffer;
 166   friend class ICStub;
 167 

 168  private:
 169   NativeCallWrapper* _call;
 170   NativeInstruction* _value;    // patchable value cell for this IC
 171   bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)
 172   CompiledMethod* _method;
 173 
 174   CompiledIC(CompiledMethod* cm, NativeCall* ic_call);
 175   CompiledIC(RelocIterator* iter);
 176 
 177   void initialize_from_iter(RelocIterator* iter);
 178 
 179   static bool is_icholder_entry(address entry);
 180 
 181   // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
 182   // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
 183   // changes to a transition stub.
 184   void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
 185   void set_ic_destination(ICStub* stub);
 186   void set_ic_destination(address entry_point) {
 187     assert(_is_optimized, "use set_ic_destination_and_value instead");
 188     internal_set_ic_destination(entry_point, false, NULL, false);
 189   }
 190   // This only for use by ICStubs where the type of the value isn't known
 191   void set_ic_destination_and_value(address entry_point, void* value) {
 192     internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));


 196   }
 197   void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
 198     internal_set_ic_destination(entry_point, false, value, true);
 199   }
 200 
 201   // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
 202   // associated with the inline cache.
 203   address stub_address() const;
 204   bool is_in_transition_state() const;  // Use InlineCacheBuffer
 205 
 206  public:
 207   // conversion (machine PC to CompiledIC*)
 208   friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
 209   friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
 210   friend CompiledIC* CompiledIC_at(Relocation* call_site);
 211   friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
 212 
 213   // This is used to release CompiledICHolder*s from nmethods that
 214   // are about to be freed.  The callsite might contain other stale
 215   // values of other kinds so it must be careful.
 216   static void cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm);
 217   static bool is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm);
 218 
 219   // Return the cached_metadata/destination associated with this inline cache. If the cache currently points
 220   // to a transition stub, it will read the values from the transition stub.
 221   void* cached_value() const;
 222   CompiledICHolder* cached_icholder() const {
 223     assert(is_icholder_call(), "must be");
 224     return (CompiledICHolder*) cached_value();
 225   }
 226   Metadata* cached_metadata() const {
 227     assert(!is_icholder_call(), "must be");
 228     return (Metadata*) cached_value();
 229   }
 230 
 231   void* get_data() const {
 232     return _call->get_data(_value);
 233   }
 234 
 235   void set_data(intptr_t data) {
 236     _call->set_data(_value, data);
 237   }
 238 
 239   address ic_destination() const;
 240 
 241   bool is_optimized() const   { return _is_optimized; }
 242 
 243   // State
 244   bool is_clean() const;
 245   bool is_megamorphic() const;
 246   bool is_call_to_compiled() const;
 247   bool is_call_to_interpreted() const;
 248 
 249   bool is_icholder_call() const;
 250 
 251   address end_of_call() { return  _call->return_address(); }
 252 
 253   // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
 254   // so you are guaranteed that no patching takes place. The same goes for verify.
 255   //
 256   // Note: We do not provide any direct access to the stub code, to prevent parts of the code
 257   // to manipulate the inline cache in MT-unsafe ways.
 258   //
 259   // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
 260   //
 261   void set_to_clean(bool in_use = true);
 262   void set_to_monomorphic(CompiledICInfo& info);
 263   void clear_ic_stub();
 264 
 265   // Returns true if successful and false otherwise. The call can fail if memory
 266   // allocation in the code cache fails.
 267   bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
 268 
 269   static void compute_monomorphic_entry(const methodHandle& method, KlassHandle receiver_klass,
 270                                         bool is_optimized, bool static_bound, bool caller_is_nmethod,
 271                                         CompiledICInfo& info, TRAPS);
 272 
 273   // Location
 274   address instruction_address() const { return _call->instruction_address(); }
 275 
 276   // Misc
 277   void print()             PRODUCT_RETURN;
 278   void print_compiled_ic() PRODUCT_RETURN;
 279   void verify()            PRODUCT_RETURN;
 280 };
 281 
 282 inline CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) {
 283   CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
 284   c_ic->verify();
 285   return c_ic;
 286 }
 287 
 288 inline CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
 289   CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
 290   c_ic->verify();
 291   return c_ic;
 292 }
 293 
 294 inline CompiledIC* CompiledIC_at(Relocation* call_site) {


 306   c_ic->verify();
 307   return c_ic;
 308 }
 309 
 310 //-----------------------------------------------------------------------------
 311 // The CompiledStaticCall represents a call to a static method in the compiled
 312 //
 313 // Transition diagram of a static call site is somewhat simpler than for an inlined cache:
 314 //
 315 //
 316 //           -----<----- Clean ----->-----
 317 //          /                             \
 318 //         /                               \
 319 //    compilled code <------------> interpreted code
 320 //
 321 //  Clean:            Calls directly to runtime method for fixup
 322 //  Compiled code:    Calls directly to compiled code
 323 //  Interpreted code: Calls to stub that set Method* reference
 324 //
 325 //

 326 
 327 class StaticCallInfo {
 328  private:
 329   address      _entry;          // Entrypoint
 330   methodHandle _callee;         // Callee (used when calling interpreter)
 331   bool         _to_interpreter; // call to interpreted method (otherwise compiled)
 332   bool         _to_aot;         // call to aot method (otherwise compiled)
 333 
 334   friend class CompiledStaticCall;
 335   friend class CompiledDirectStaticCall;
 336   friend class CompiledPltStaticCall;
 337  public:
 338   address      entry() const    { return _entry;  }
 339   methodHandle callee() const   { return _callee; }
 340 };
 341 
 342 class CompiledStaticCall : public ResourceObj {







 343  public:




 344   // Code
 345   static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL);
 346   static int to_interp_stub_size();
 347   static int reloc_to_interp_stub();
 348   static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL);
 349   static int to_aot_stub_size();
 350   static int reloc_to_aot_stub();
 351 
 352   // Compute entry point given a method
 353   static void compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info);
 354 
 355 public:
 356   // Clean static call (will force resolving on next use)
 357   virtual address destination() const = 0;
 358 
 359   // Clean static call (will force resolving on next use)
 360   void set_to_clean();
 361 
 362   // Set state. The entry must be the same, as computed by compute_entry.
 363   // Computation and setting is split up, since the actions are separate during
 364   // a OptoRuntime::resolve_xxx.
 365   void set(const StaticCallInfo& info);
 366 
 367   // State
 368   bool is_clean() const;
 369   bool is_call_to_compiled() const;
 370   virtual bool is_call_to_interpreted() const = 0;
 371 
 372   virtual address instruction_address() const = 0;
 373 protected:
 374   virtual address resolve_call_stub() const = 0;
 375   virtual void set_destination_mt_safe(address dest) = 0;
 376 #if INCLUDE_AOT
 377   virtual void set_to_far(methodHandle callee, address entry) = 0;
 378 #endif
 379   virtual void set_to_interpreted(methodHandle callee, address entry) = 0;
 380   virtual const char* name() const = 0;
 381 
 382   void set_to_compiled(address entry);


 383 };
 384 
 385 class CompiledDirectStaticCall : public CompiledStaticCall {
 386 private:
 387   friend class CompiledIC;
 388   friend class DirectNativeCallWrapper;
 389 
 390   // Also used by CompiledIC
 391   void set_to_interpreted(methodHandle callee, address entry);
 392 #if INCLUDE_AOT
 393   void set_to_far(methodHandle callee, address entry);
 394 #endif
 395   address instruction_address() const { return _call->instruction_address(); }
 396   void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); }
 397 
 398   NativeCall* _call;
 399 
 400   CompiledDirectStaticCall(NativeCall* call) : _call(call) {}
 401 
 402  public:
 403   static inline CompiledDirectStaticCall* before(address return_addr) {
 404     CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_before(return_addr));
 405     st->verify();
 406     return st;
 407   }
 408 
 409   static inline CompiledDirectStaticCall* at(address native_call) {
 410     CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_at(native_call));
 411     st->verify();
 412     return st;
 413   }
 414 
 415   static inline CompiledDirectStaticCall* at(Relocation* call_site) {
 416     return at(call_site->addr());
 417   }
 418 
 419   // Delegation
 420   address destination() const { return _call->destination(); }
 421 
 422   // State
 423   virtual bool is_call_to_interpreted() const;
 424   bool is_call_to_far() const;
 425 
 426   // Stub support
 427   static address find_stub_for(address instruction, bool is_aot);
 428   address find_stub(bool is_aot);
 429   static void set_stub_to_clean(static_stub_Relocation* static_stub);
 430 
 431   // Misc.
 432   void print()  PRODUCT_RETURN;
 433   void verify() PRODUCT_RETURN;
 434 
 435  protected:
 436   virtual address resolve_call_stub() const;
 437   virtual const char* name() const { return "CompiledDirectStaticCall"; }
 438 };
 439 
 440 #endif // SHARE_VM_CODE_COMPILEDIC_HPP
src/share/vm/code/compiledIC.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File