1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_CODE_COMPILEDIC_HPP 26 #define SHARE_VM_CODE_COMPILEDIC_HPP 27 28 #include "code/nativeInst.hpp" 29 #include "interpreter/linkResolver.hpp" 30 #include "oops/compiledICHolder.hpp" 31 32 //----------------------------------------------------------------------------- 33 // The CompiledIC represents a compiled inline cache. 34 // 35 // In order to make patching of the inline cache MT-safe, we only allow the following 36 // transitions (when not at a safepoint): 37 // 38 // 39 // [1] --<-- Clean -->--- [1] 40 // / (null) \ 41 // / \ /-<-\ 42 // / [2] \ / \ 43 // Interpreted ---------> Monomorphic | [3] 44 // (CompiledICHolder*) (Klass*) | 45 // \ / \ / 46 // [4] \ / [4] \->-/ 47 // \->- Megamorphic -<-/ 48 // (CompiledICHolder*) 49 // 50 // The text in parentheses () refers to the value of the inline cache receiver (mov instruction) 51 // 52 // The numbers in square brackets refer to the kind of transition: 53 // [1]: Initial fixup. Receiver it found from debug information 54 // [2]: Compilation of a method 55 // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same) 56 // [4]: Inline cache miss. We go directly to megamorphic call. 57 // 58 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe 59 // transition is made to a stub. 60 // 61 class CompiledIC; 62 class CompiledICProtectionBehaviour; 63 class CompiledMethod; 64 class ICStub; 65 66 class CompiledICLocker: public StackObj { 67 CompiledMethod* _method; 68 CompiledICProtectionBehaviour* _behaviour; 69 bool _locked; 70 71 public: 72 CompiledICLocker(CompiledMethod* method); 73 ~CompiledICLocker(); 74 static bool is_safe(CompiledMethod* method); 75 static bool is_safe(address code); 76 }; 77 78 class CompiledICInfo : public StackObj { 79 private: 80 address _entry; // entry point for call 81 void* _cached_value; // Value of cached_value (either in stub or inline cache) 82 bool _is_icholder; // Is the cached value a CompiledICHolder* 83 bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound) 84 bool _to_interpreter; // Call it to interpreter 85 bool _to_aot; // Call it to aot code 86 bool _release_icholder; 87 public: 88 address entry() const { return _entry; } 89 Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; } 90 CompiledICHolder* claim_cached_icholder() { 91 assert(_is_icholder, ""); 92 assert(_cached_value != NULL, "must be non-NULL"); 93 _release_icholder = false; 94 CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; 95 icholder->claim(); 96 return icholder; 97 } 98 bool is_optimized() const { return _is_optimized; } 99 bool to_interpreter() const { return _to_interpreter; } 100 bool to_aot() const { return _to_aot; } 101 102 void set_compiled_entry(address entry, Klass* klass, bool is_optimized) { 103 _entry = entry; 104 _cached_value = (void*)klass; 105 _to_interpreter = false; 106 _to_aot = false; 107 _is_icholder = false; 108 _is_optimized = is_optimized; 109 _release_icholder = false; 110 } 111 112 void set_interpreter_entry(address entry, Method* method) { 113 _entry = entry; 114 _cached_value = (void*)method; 115 _to_interpreter = true; 116 _to_aot = false; 117 _is_icholder = false; 118 _is_optimized = true; 119 _release_icholder = false; 120 } 121 122 void set_aot_entry(address entry, Method* method) { 123 _entry = entry; 124 _cached_value = (void*)method; 125 _to_interpreter = false; 126 _to_aot = true; 127 _is_icholder = false; 128 _is_optimized = true; 129 _release_icholder = false; 130 } 131 132 void set_icholder_entry(address entry, CompiledICHolder* icholder) { 133 _entry = entry; 134 _cached_value = (void*)icholder; 135 _to_interpreter = true; 136 _to_aot = false; 137 _is_icholder = true; 138 _is_optimized = false; 139 _release_icholder = true; 140 } 141 142 CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false), 143 _is_optimized(false), _to_interpreter(false), _to_aot(false), _release_icholder(false) { 144 } 145 ~CompiledICInfo() { 146 // In rare cases the info is computed but not used, so release any 147 // CompiledICHolder* that was created 148 if (_release_icholder) { 149 assert(_is_icholder, "must be"); 150 CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; 151 icholder->claim(); 152 delete icholder; 153 } 154 } 155 }; 156 157 class NativeCallWrapper: public ResourceObj { 158 public: 159 virtual address destination() const = 0; 160 virtual address instruction_address() const = 0; 161 virtual address next_instruction_address() const = 0; 162 virtual address return_address() const = 0; 163 virtual address get_resolve_call_stub(bool is_optimized) const = 0; 164 virtual void set_destination_mt_safe(address dest) = 0; 165 virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) = 0; 166 virtual void verify() const = 0; 167 virtual void verify_resolve_call(address dest) const = 0; 168 169 virtual bool is_call_to_interpreted(address dest) const = 0; 170 virtual bool is_safe_for_patching() const = 0; 171 172 virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const = 0; 173 174 virtual void *get_data(NativeInstruction* instruction) const = 0; 175 virtual void set_data(NativeInstruction* instruction, intptr_t data) = 0; 176 }; 177 178 class CompiledIC: public ResourceObj { 179 friend class InlineCacheBuffer; 180 friend class ICStub; 181 182 private: 183 NativeCallWrapper* _call; 184 NativeInstruction* _value; // patchable value cell for this IC 185 bool _is_optimized; // an optimized virtual call (i.e., no compiled IC) 186 CompiledMethod* _method; 187 188 CompiledIC(CompiledMethod* cm, NativeCall* ic_call); 189 CompiledIC(RelocIterator* iter); 190 191 void initialize_from_iter(RelocIterator* iter); 192 193 static bool is_icholder_entry(address entry); 194 195 // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe 196 // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make 197 // changes to a transition stub. 198 void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder); 199 void set_ic_destination(ICStub* stub); 200 void set_ic_destination(address entry_point) { 201 assert(_is_optimized, "use set_ic_destination_and_value instead"); 202 internal_set_ic_destination(entry_point, false, NULL, false); 203 } 204 // This only for use by ICStubs where the type of the value isn't known 205 void set_ic_destination_and_value(address entry_point, void* value) { 206 internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point)); 207 } 208 void set_ic_destination_and_value(address entry_point, Metadata* value) { 209 internal_set_ic_destination(entry_point, false, value, false); 210 } 211 void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) { 212 internal_set_ic_destination(entry_point, false, value, true); 213 } 214 215 // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is 216 // associated with the inline cache. 217 address stub_address() const; 218 bool is_in_transition_state() const; // Use InlineCacheBuffer 219 220 public: 221 // conversion (machine PC to CompiledIC*) 222 friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr); 223 friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site); 224 friend CompiledIC* CompiledIC_at(Relocation* call_site); 225 friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter); 226 227 // This is used to release CompiledICHolder*s from nmethods that 228 // are about to be freed. The callsite might contain other stale 229 // values of other kinds so it must be careful. 230 static void cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm); 231 static bool is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm); 232 233 // Return the cached_metadata/destination associated with this inline cache. If the cache currently points 234 // to a transition stub, it will read the values from the transition stub. 235 void* cached_value() const; 236 CompiledICHolder* cached_icholder() const { 237 assert(is_icholder_call(), "must be"); 238 return (CompiledICHolder*) cached_value(); 239 } 240 Metadata* cached_metadata() const { 241 assert(!is_icholder_call(), "must be"); 242 return (Metadata*) cached_value(); 243 } 244 245 void* get_data() const { 246 return _call->get_data(_value); 247 } 248 249 void set_data(intptr_t data) { 250 _call->set_data(_value, data); 251 } 252 253 address ic_destination() const; 254 255 bool is_optimized() const { return _is_optimized; } 256 257 // State 258 bool is_clean() const; 259 bool is_megamorphic() const; 260 bool is_call_to_compiled() const; 261 bool is_call_to_interpreted() const; 262 263 bool is_icholder_call() const; 264 265 address end_of_call() { return _call->return_address(); } 266 267 // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock 268 // so you are guaranteed that no patching takes place. The same goes for verify. 269 // 270 // Note: We do not provide any direct access to the stub code, to prevent parts of the code 271 // to manipulate the inline cache in MT-unsafe ways. 272 // 273 // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full. 274 // 275 bool set_to_clean(bool in_use = true); 276 bool set_to_monomorphic(CompiledICInfo& info); 277 void clear_ic_stub(); 278 279 // Returns true if successful and false otherwise. The call can fail if memory 280 // allocation in the code cache fails. 281 bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS); 282 283 static void compute_monomorphic_entry(const methodHandle& method, Klass* receiver_klass, 284 bool is_optimized, bool static_bound, bool caller_is_nmethod, 285 CompiledICInfo& info, TRAPS); 286 287 // Location 288 address instruction_address() const { return _call->instruction_address(); } 289 290 // Misc 291 void print() PRODUCT_RETURN; 292 void print_compiled_ic() PRODUCT_RETURN; 293 void verify() PRODUCT_RETURN; 294 }; 295 296 inline CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) { 297 CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr)); 298 c_ic->verify(); 299 return c_ic; 300 } 301 302 inline CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) { 303 CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site)); 304 c_ic->verify(); 305 return c_ic; 306 } 307 308 inline CompiledIC* CompiledIC_at(Relocation* call_site) { 309 assert(call_site->type() == relocInfo::virtual_call_type || 310 call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); 311 CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr())); 312 c_ic->verify(); 313 return c_ic; 314 } 315 316 inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) { 317 assert(reloc_iter->type() == relocInfo::virtual_call_type || 318 reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); 319 CompiledIC* c_ic = new CompiledIC(reloc_iter); 320 c_ic->verify(); 321 return c_ic; 322 } 323 324 //----------------------------------------------------------------------------- 325 // The CompiledStaticCall represents a call to a static method in the compiled 326 // 327 // Transition diagram of a static call site is somewhat simpler than for an inlined cache: 328 // 329 // 330 // -----<----- Clean ----->----- 331 // / \ 332 // / \ 333 // compilled code <------------> interpreted code 334 // 335 // Clean: Calls directly to runtime method for fixup 336 // Compiled code: Calls directly to compiled code 337 // Interpreted code: Calls to stub that set Method* reference 338 // 339 // 340 341 class StaticCallInfo { 342 private: 343 address _entry; // Entrypoint 344 methodHandle _callee; // Callee (used when calling interpreter) 345 bool _to_interpreter; // call to interpreted method (otherwise compiled) 346 bool _to_aot; // call to aot method (otherwise compiled) 347 348 friend class CompiledStaticCall; 349 friend class CompiledDirectStaticCall; 350 friend class CompiledPltStaticCall; 351 public: 352 address entry() const { return _entry; } 353 methodHandle callee() const { return _callee; } 354 }; 355 356 class CompiledStaticCall : public ResourceObj { 357 public: 358 // Code 359 static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL); 360 static int to_interp_stub_size(); 361 static int to_trampoline_stub_size(); 362 static int reloc_to_interp_stub(); 363 static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL); 364 static int to_aot_stub_size(); 365 static int reloc_to_aot_stub(); 366 367 // Compute entry point given a method 368 static void compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info); 369 370 public: 371 // Clean static call (will force resolving on next use) 372 virtual address destination() const = 0; 373 374 // Clean static call (will force resolving on next use) 375 bool set_to_clean(bool in_use = true); 376 377 // Set state. The entry must be the same, as computed by compute_entry. 378 // Computation and setting is split up, since the actions are separate during 379 // a OptoRuntime::resolve_xxx. 380 void set(const StaticCallInfo& info); 381 382 // State 383 bool is_clean() const; 384 bool is_call_to_compiled() const; 385 virtual bool is_call_to_interpreted() const = 0; 386 387 virtual address instruction_address() const = 0; 388 protected: 389 virtual address resolve_call_stub() const = 0; 390 virtual void set_destination_mt_safe(address dest) = 0; 391 #if INCLUDE_AOT 392 virtual void set_to_far(const methodHandle& callee, address entry) = 0; 393 #endif 394 virtual void set_to_interpreted(const methodHandle& callee, address entry) = 0; 395 virtual const char* name() const = 0; 396 397 void set_to_compiled(address entry); 398 }; 399 400 class CompiledDirectStaticCall : public CompiledStaticCall { 401 private: 402 friend class CompiledIC; 403 friend class DirectNativeCallWrapper; 404 405 // Also used by CompiledIC 406 void set_to_interpreted(const methodHandle& callee, address entry); 407 #if INCLUDE_AOT 408 void set_to_far(const methodHandle& callee, address entry); 409 #endif 410 address instruction_address() const { return _call->instruction_address(); } 411 void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); } 412 413 NativeCall* _call; 414 415 CompiledDirectStaticCall(NativeCall* call) : _call(call) {} 416 417 public: 418 static inline CompiledDirectStaticCall* before(address return_addr) { 419 CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_before(return_addr)); 420 st->verify(); 421 return st; 422 } 423 424 static inline CompiledDirectStaticCall* at(address native_call) { 425 CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_at(native_call)); 426 st->verify(); 427 return st; 428 } 429 430 static inline CompiledDirectStaticCall* at(Relocation* call_site) { 431 return at(call_site->addr()); 432 } 433 434 // Delegation 435 address destination() const { return _call->destination(); } 436 437 // State 438 virtual bool is_call_to_interpreted() const; 439 bool is_call_to_far() const; 440 441 // Stub support 442 static address find_stub_for(address instruction, bool is_aot); 443 address find_stub(bool is_aot); 444 static void set_stub_to_clean(static_stub_Relocation* static_stub); 445 446 // Misc. 447 void print() PRODUCT_RETURN; 448 void verify() PRODUCT_RETURN; 449 450 protected: 451 virtual address resolve_call_stub() const; 452 virtual const char* name() const { return "CompiledDirectStaticCall"; } 453 }; 454 455 #endif // SHARE_VM_CODE_COMPILEDIC_HPP