1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_CODE_COMPILEDIC_HPP
  26 #define SHARE_VM_CODE_COMPILEDIC_HPP
  27 
  28 #include "interpreter/linkResolver.hpp"
  29 #include "oops/compiledICHolderKlass.hpp"
  30 #include "oops/compiledICHolderOop.hpp"
  31 #include "oops/klassOop.hpp"
  32 #ifdef TARGET_ARCH_x86
  33 # include "nativeInst_x86.hpp"
  34 #endif
  35 #ifdef TARGET_ARCH_sparc
  36 # include "nativeInst_sparc.hpp"
  37 #endif
  38 #ifdef TARGET_ARCH_zero
  39 # include "nativeInst_zero.hpp"
  40 #endif
  41 
  42 //-----------------------------------------------------------------------------
  43 // The CompiledIC represents a compiled inline cache.
  44 //
  45 // In order to make patching of the inline cache MT-safe, we only allow the following
  46 // transitions (when not at a safepoint):
  47 //
  48 //
  49 //         [1] --<--  Clean -->---  [1]
  50 //            /       (null)      \
  51 //           /                     \      /-<-\
  52 //          /          [2]          \    /     \
  53 //      Interpreted  ---------> Monomorphic     | [3]
  54 //  (compiledICHolderOop)        (klassOop)     |
  55 //          \                        /   \     /
  56 //       [4] \                      / [4] \->-/
  57 //            \->-  Megamorphic -<-/
  58 //                  (methodOop)
  59 //
  60 // The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
  61 //
  62 // The numbers in square brackets refere to the kind of transition:
  63 // [1]: Initial fixup. Receiver it found from debug information
  64 // [2]: Compilation of a method
  65 // [3]: Recompilation of a method (note: only entry is changed. The klassOop must stay the same)
  66 // [4]: Inline cache miss. We go directly to megamorphic call.
  67 //
  68 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
  69 // transition is made to a stub.
  70 //
  71 class CompiledIC;
  72 
  73 class CompiledICInfo {
  74   friend class CompiledIC;
  75  private:
  76   address _entry;              // entry point for call
  77   Handle  _cached_oop;         // Value of cached_oop (either in stub or inline cache)
  78   bool    _is_optimized;       // it is an optimized virtual call (i.e., can be statically bound)
  79   bool    _to_interpreter;     // Call it to interpreter
  80  public:
  81   address entry() const        { return _entry; }
  82   Handle  cached_oop() const   { return _cached_oop; }
  83   bool    is_optimized() const { return _is_optimized; }
  84 };
  85 
  86 class CompiledIC: public ResourceObj {
  87   friend class InlineCacheBuffer;
  88   friend class ICStub;
  89 
  90 
  91  private:
  92   NativeCall*   _ic_call;       // the call instruction
  93   oop*          _oop_addr;      // patchable oop cell for this IC
  94   RelocIterator _oops;          // iteration over any and all set-oop instructions
  95   bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)
  96 
  97   CompiledIC(NativeCall* ic_call);
  98   CompiledIC(Relocation* ic_reloc);    // Must be of virtual_call_type/opt_virtual_call_type
  99 
 100   // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
 101   // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
 102   // changes to a transition stub.
 103   void set_ic_destination(address entry_point);
 104   void set_cached_oop(oop cache);
 105 
 106   // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
 107   // associated with the inline cache.
 108   address stub_address() const;
 109   bool is_in_transition_state() const;  // Use InlineCacheBuffer
 110 
 111  public:
 112   // conversion (machine PC to CompiledIC*)
 113   friend CompiledIC* CompiledIC_before(address return_addr);
 114   friend CompiledIC* CompiledIC_at(address call_site);
 115   friend CompiledIC* CompiledIC_at(Relocation* call_site);
 116 
 117   // Return the cached_oop/destination associated with this inline cache. If the cache currently points
 118   // to a transition stub, it will read the values from the transition stub.
 119   oop  cached_oop() const;
 120   address ic_destination() const;
 121 
 122   bool is_optimized() const   { return _is_optimized; }
 123 
 124   // State
 125   bool is_clean() const;
 126   bool is_megamorphic() const;
 127   bool is_call_to_compiled() const;
 128   bool is_call_to_interpreted() const;
 129 
 130   address end_of_call() { return  _ic_call->return_address(); }
 131 
 132   // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
 133   // so you are guaranteed that no patching takes place. The same goes for verify.
 134   //
 135   // Note: We do not provide any direct access to the stub code, to prevent parts of the code
 136   // to manipulate the inline cache in MT-unsafe ways.
 137   //
 138   // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
 139   //
 140   void set_to_clean();  // Can only be called during a safepoint operation
 141   void set_to_monomorphic(const CompiledICInfo& info);
 142   void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
 143 
 144   static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
 145                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
 146 
 147   // Location
 148   address instruction_address() const { return _ic_call->instruction_address(); }
 149 
 150   // Misc
 151   void print()             PRODUCT_RETURN;
 152   void print_compiled_ic() PRODUCT_RETURN;
 153   void verify()            PRODUCT_RETURN;
 154 };
 155 
 156 inline CompiledIC* CompiledIC_before(address return_addr) {
 157   CompiledIC* c_ic = new CompiledIC(nativeCall_before(return_addr));
 158   c_ic->verify();
 159   return c_ic;
 160 }
 161 
 162 inline CompiledIC* CompiledIC_at(address call_site) {
 163   CompiledIC* c_ic = new CompiledIC(nativeCall_at(call_site));
 164   c_ic->verify();
 165   return c_ic;
 166 }
 167 
 168 inline CompiledIC* CompiledIC_at(Relocation* call_site) {
 169   CompiledIC* c_ic = new CompiledIC(call_site);
 170   c_ic->verify();
 171   return c_ic;
 172 }
 173 
 174 
 175 //-----------------------------------------------------------------------------
 176 // The CompiledStaticCall represents a call to a static method in the compiled
 177 //
 178 // Transition diagram of a static call site is somewhat simpler than for an inlined cache:
 179 //
 180 //
 181 //           -----<----- Clean ----->-----
 182 //          /                             \
 183 //         /                               \
 184 //    compilled code <------------> interpreted code
 185 //
 186 //  Clean:            Calls directly to runtime method for fixup
 187 //  Compiled code:    Calls directly to compiled code
 188 //  Interpreted code: Calls to stub that set methodOop reference
 189 //
 190 //
 191 class CompiledStaticCall;
 192 
 193 class StaticCallInfo {
 194  private:
 195   address      _entry;          // Entrypoint
 196   methodHandle _callee;         // Callee (used when calling interpreter)
 197   bool         _to_interpreter; // call to interpreted method (otherwise compiled)
 198 
 199   friend class CompiledStaticCall;
 200  public:
 201   address      entry() const    { return _entry;  }
 202   methodHandle callee() const   { return _callee; }
 203 };
 204 
 205 
 206 class CompiledStaticCall: public NativeCall {
 207   friend class CompiledIC;
 208 
 209   // Also used by CompiledIC
 210   void set_to_interpreted(methodHandle callee, address entry);
 211   bool is_optimized_virtual();
 212 
 213  public:
 214   friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
 215   friend CompiledStaticCall* compiledStaticCall_at(address native_call);
 216   friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
 217 
 218   // State
 219   bool is_clean() const;
 220   bool is_call_to_compiled() const;
 221   bool is_call_to_interpreted() const;
 222 
 223   // Clean static call (will force resolving on next use)
 224   void set_to_clean();
 225 
 226   // Set state. The entry must be the same, as computed by compute_entry.
 227   // Computation and setting is split up, since the actions are separate during
 228   // a OptoRuntime::resolve_xxx.
 229   void set(const StaticCallInfo& info);
 230 
 231   // Compute entry point given a method
 232   static void compute_entry(methodHandle m, StaticCallInfo& info);
 233 
 234   // Stub support
 235   address find_stub();
 236   static void set_stub_to_clean(static_stub_Relocation* static_stub);
 237 
 238   // Misc.
 239   void print()  PRODUCT_RETURN;
 240   void verify() PRODUCT_RETURN;
 241 };
 242 
 243 
 244 inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
 245   CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
 246   st->verify();
 247   return st;
 248 }
 249 
 250 inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
 251   CompiledStaticCall* st = (CompiledStaticCall*)native_call;
 252   st->verify();
 253   return st;
 254 }
 255 
 256 inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
 257   return compiledStaticCall_at(call_site->addr());
 258 }
 259 
 260 #endif // SHARE_VM_CODE_COMPILEDIC_HPP