1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_CODE_COMPILEDIC_HPP
  26 #define SHARE_VM_CODE_COMPILEDIC_HPP
  27 
  28 #include "interpreter/linkResolver.hpp"
  29 #include "oops/compiledICHolder.hpp"
  30 #ifdef TARGET_ARCH_x86
  31 # include "nativeInst_x86.hpp"
  32 #endif
  33 #ifdef TARGET_ARCH_aarch64
  34 # include "nativeInst_aarch64.hpp"
  35 #endif
  36 #ifdef TARGET_ARCH_sparc
  37 # include "nativeInst_sparc.hpp"
  38 #endif
  39 #ifdef TARGET_ARCH_zero
  40 # include "nativeInst_zero.hpp"
  41 #endif
  42 #ifdef TARGET_ARCH_arm
  43 # include "nativeInst_arm.hpp"
  44 #endif
  45 #ifdef TARGET_ARCH_ppc
  46 # include "nativeInst_ppc.hpp"
  47 #endif
  48 
  49 //-----------------------------------------------------------------------------
  50 // The CompiledIC represents a compiled inline cache.
  51 //
  52 // In order to make patching of the inline cache MT-safe, we only allow the following
  53 // transitions (when not at a safepoint):
  54 //
  55 //
  56 //         [1] --<--  Clean -->---  [1]
  57 //            /       (null)      \
  58 //           /                     \      /-<-\
  59 //          /          [2]          \    /     \
  60 //      Interpreted  ---------> Monomorphic     | [3]
  61 //  (CompiledICHolder*)            (Klass*)     |
  62 //          \                        /   \     /
  63 //       [4] \                      / [4] \->-/
  64 //            \->-  Megamorphic -<-/
  65 //              (CompiledICHolder*)
  66 //
  67 // The text in parentheses () refers to the value of the inline cache receiver (mov instruction)
  68 //
  69 // The numbers in square brackets refer to the kind of transition:
  70 // [1]: Initial fixup. Receiver it found from debug information
  71 // [2]: Compilation of a method
  72 // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
  73 // [4]: Inline cache miss. We go directly to megamorphic call.
  74 //
  75 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
  76 // transition is made to a stub.
  77 //
  78 class CompiledIC;
  79 class ICStub;
  80 
  81 class CompiledICInfo : public StackObj {
  82  private:
  83   address _entry;              // entry point for call
  84   void*   _cached_value;         // Value of cached_value (either in stub or inline cache)
  85   bool    _is_icholder;          // Is the cached value a CompiledICHolder*
  86   bool    _is_optimized;       // it is an optimized virtual call (i.e., can be statically bound)
  87   bool    _to_interpreter;     // Call it to interpreter
  88   bool    _release_icholder;
  89  public:
  90   address entry() const        { return _entry; }
  91   Metadata*    cached_metadata() const         { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
  92   CompiledICHolder*    claim_cached_icholder() {
  93     assert(_is_icholder, "");
  94     assert(_cached_value != NULL, "must be non-NULL");
  95     _release_icholder = false;
  96     CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
  97     icholder->claim();
  98     return icholder;
  99   }
 100   bool    is_optimized() const { return _is_optimized; }
 101   bool         to_interpreter() const  { return _to_interpreter; }
 102 
 103   void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
 104     _entry      = entry;
 105     _cached_value = (void*)klass;
 106     _to_interpreter = false;
 107     _is_icholder = false;
 108     _is_optimized = is_optimized;
 109     _release_icholder = false;
 110   }
 111 
 112   void set_interpreter_entry(address entry, Method* method) {
 113     _entry      = entry;
 114     _cached_value = (void*)method;
 115     _to_interpreter = true;
 116     _is_icholder = false;
 117     _is_optimized = true;
 118     _release_icholder = false;
 119   }
 120 
 121   void set_icholder_entry(address entry, CompiledICHolder* icholder) {
 122     _entry      = entry;
 123     _cached_value = (void*)icholder;
 124     _to_interpreter = true;
 125     _is_icholder = true;
 126     _is_optimized = false;
 127     _release_icholder = true;
 128   }
 129 
 130   CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
 131                     _to_interpreter(false), _is_optimized(false), _release_icholder(false) {
 132   }
 133   ~CompiledICInfo() {
 134     // In rare cases the info is computed but not used, so release any
 135     // CompiledICHolder* that was created
 136     if (_release_icholder) {
 137       assert(_is_icholder, "must be");
 138       CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
 139       icholder->claim();
 140       delete icholder;
 141     }
 142   }
 143 };
 144 
 145 class CompiledIC: public ResourceObj {
 146   friend class InlineCacheBuffer;
 147   friend class ICStub;
 148 
 149 
 150  private:
 151   NativeCall*   _ic_call;       // the call instruction
 152   NativeMovConstReg* _value;    // patchable value cell for this IC
 153   bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)
 154 
 155   CompiledIC(nmethod* nm, NativeCall* ic_call);
 156   CompiledIC(RelocIterator* iter);
 157 
 158   void initialize_from_iter(RelocIterator* iter);
 159 
 160   static bool is_icholder_entry(address entry);
 161 
 162   // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
 163   // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
 164   // changes to a transition stub.
 165   void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
 166   void set_ic_destination(ICStub* stub);
 167   void set_ic_destination(address entry_point) {
 168     assert(_is_optimized, "use set_ic_destination_and_value instead");
 169     internal_set_ic_destination(entry_point, false, NULL, false);
 170   }
 171   // This only for use by ICStubs where the type of the value isn't known
 172   void set_ic_destination_and_value(address entry_point, void* value) {
 173     internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
 174   }
 175   void set_ic_destination_and_value(address entry_point, Metadata* value) {
 176     internal_set_ic_destination(entry_point, false, value, false);
 177   }
 178   void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
 179     internal_set_ic_destination(entry_point, false, value, true);
 180   }
 181 
 182   // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
 183   // associated with the inline cache.
 184   address stub_address() const;
 185   bool is_in_transition_state() const;  // Use InlineCacheBuffer
 186 
 187  public:
 188   // conversion (machine PC to CompiledIC*)
 189   friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
 190   friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
 191   friend CompiledIC* CompiledIC_at(Relocation* call_site);
 192   friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
 193 
 194   // This is used to release CompiledICHolder*s from nmethods that
 195   // are about to be freed.  The callsite might contain other stale
 196   // values of other kinds so it must be careful.
 197   static void cleanup_call_site(virtual_call_Relocation* call_site);
 198   static bool is_icholder_call_site(virtual_call_Relocation* call_site);
 199 
 200   // Return the cached_metadata/destination associated with this inline cache. If the cache currently points
 201   // to a transition stub, it will read the values from the transition stub.
 202   void* cached_value() const;
 203   CompiledICHolder* cached_icholder() const {
 204     assert(is_icholder_call(), "must be");
 205     return (CompiledICHolder*) cached_value();
 206   }
 207   Metadata* cached_metadata() const {
 208     assert(!is_icholder_call(), "must be");
 209     return (Metadata*) cached_value();
 210   }
 211 
 212   address ic_destination() const;
 213 
 214   bool is_optimized() const   { return _is_optimized; }
 215 
 216   // State
 217   bool is_clean() const;
 218   bool is_megamorphic() const;
 219   bool is_call_to_compiled() const;
 220   bool is_call_to_interpreted() const;
 221 
 222   bool is_icholder_call() const;
 223 
 224   address end_of_call() { return  _ic_call->return_address(); }
 225 
 226   // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
 227   // so you are guaranteed that no patching takes place. The same goes for verify.
 228   //
 229   // Note: We do not provide any direct access to the stub code, to prevent parts of the code
 230   // to manipulate the inline cache in MT-unsafe ways.
 231   //
 232   // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
 233   //
 234   void set_to_clean(bool in_use = true);
 235   void set_to_monomorphic(CompiledICInfo& info);
 236   void clear_ic_stub();
 237 
 238   // Returns true if successful and false otherwise. The call can fail if memory
 239   // allocation in the code cache fails.
 240   bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
 241 
 242   static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
 243                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
 244 
 245   // Location
 246   address instruction_address() const { return _ic_call->instruction_address(); }
 247 
 248   // Misc
 249   void print()             PRODUCT_RETURN;
 250   void print_compiled_ic() PRODUCT_RETURN;
 251   void verify()            PRODUCT_RETURN;
 252 };
 253 
 254 inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
 255   CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
 256   c_ic->verify();
 257   return c_ic;
 258 }
 259 
 260 inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
 261   CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
 262   c_ic->verify();
 263   return c_ic;
 264 }
 265 
 266 inline CompiledIC* CompiledIC_at(Relocation* call_site) {
 267   assert(call_site->type() == relocInfo::virtual_call_type ||
 268          call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
 269   CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
 270   c_ic->verify();
 271   return c_ic;
 272 }
 273 
 274 inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
 275   assert(reloc_iter->type() == relocInfo::virtual_call_type ||
 276       reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
 277   CompiledIC* c_ic = new CompiledIC(reloc_iter);
 278   c_ic->verify();
 279   return c_ic;
 280 }
 281 
 282 //-----------------------------------------------------------------------------
 283 // The CompiledStaticCall represents a call to a static method in the compiled
 284 //
 285 // Transition diagram of a static call site is somewhat simpler than for an inlined cache:
 286 //
 287 //
 288 //           -----<----- Clean ----->-----
 289 //          /                             \
 290 //         /                               \
 291 //    compilled code <------------> interpreted code
 292 //
 293 //  Clean:            Calls directly to runtime method for fixup
 294 //  Compiled code:    Calls directly to compiled code
 295 //  Interpreted code: Calls to stub that set Method* reference
 296 //
 297 //
 298 class CompiledStaticCall;
 299 
 300 class StaticCallInfo {
 301  private:
 302   address      _entry;          // Entrypoint
 303   methodHandle _callee;         // Callee (used when calling interpreter)
 304   bool         _to_interpreter; // call to interpreted method (otherwise compiled)
 305 
 306   friend class CompiledStaticCall;
 307  public:
 308   address      entry() const    { return _entry;  }
 309   methodHandle callee() const   { return _callee; }
 310 };
 311 
 312 
 313 class CompiledStaticCall: public NativeCall {
 314   friend class CompiledIC;
 315 
 316   // Also used by CompiledIC
 317   void set_to_interpreted(methodHandle callee, address entry);
 318   bool is_optimized_virtual();
 319 
 320  public:
 321   friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
 322   friend CompiledStaticCall* compiledStaticCall_at(address native_call);
 323   friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
 324 
 325   // Code
 326 #if defined(AARCH64) && !defined(ZERO)
 327   static address emit_to_interp_stub(CodeBuffer &cbuf, address mark);
 328 #else
 329   static address emit_to_interp_stub(CodeBuffer &cbuf);
 330 #endif
 331   static int to_interp_stub_size();
 332   static int reloc_to_interp_stub();
 333 
 334   // State
 335   bool is_clean() const;
 336   bool is_call_to_compiled() const;
 337   bool is_call_to_interpreted() const;
 338 
 339   // Clean static call (will force resolving on next use)
 340   void set_to_clean();
 341 
 342   // Set state. The entry must be the same, as computed by compute_entry.
 343   // Computation and setting is split up, since the actions are separate during
 344   // a OptoRuntime::resolve_xxx.
 345   void set(const StaticCallInfo& info);
 346 
 347   // Compute entry point given a method
 348   static void compute_entry(methodHandle m, StaticCallInfo& info);
 349 
 350   // Stub support
 351   address find_stub();
 352   static void set_stub_to_clean(static_stub_Relocation* static_stub);
 353 
 354   // Misc.
 355   void print()  PRODUCT_RETURN;
 356   void verify() PRODUCT_RETURN;
 357 };
 358 
 359 
 360 inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
 361   CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
 362   st->verify();
 363   return st;
 364 }
 365 
 366 inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
 367   CompiledStaticCall* st = (CompiledStaticCall*)native_call;
 368   st->verify();
 369   return st;
 370 }
 371 
 372 inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
 373   return compiledStaticCall_at(call_site->addr());
 374 }
 375 
 376 #endif // SHARE_VM_CODE_COMPILEDIC_HPP