1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_CODE_COMPILEDIC_HPP
26 #define SHARE_VM_CODE_COMPILEDIC_HPP
27
28 #include "code/nativeInst.hpp"
29 #include "interpreter/linkResolver.hpp"
30 #include "oops/compiledICHolder.hpp"
31
32 //-----------------------------------------------------------------------------
33 // The CompiledIC represents a compiled inline cache.
34 //
35 // In order to make patching of the inline cache MT-safe, we only allow the following
36 // transitions (when not at a safepoint):
37 //
38 //
39 // [1] --<-- Clean -->--- [1]
40 // / (null) \
41 // / \ /-<-\
42 // / [2] \ / \
43 // Interpreted ---------> Monomorphic | [3]
44 // (CompiledICHolder*) (Klass*) |
45 // \ / \ /
46 // [4] \ / [4] \->-/
47 // \->- Megamorphic -<-/
48 // (Method*)
49 //
50 // The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
51 //
52 // The numbers in square brackets refere to the kind of transition:
53 // [1]: Initial fixup. Receiver it found from debug information
54 // [2]: Compilation of a method
55 // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
56 // [4]: Inline cache miss. We go directly to megamorphic call.
57 //
58 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
59 // transition is made to a stub.
60 //
61 class CompiledIC;
62 class ICStub;
63
64 class CompiledICInfo : public StackObj {
65 private:
66 address _entry; // entry point for call
67 void* _cached_value; // Value of cached_value (either in stub or inline cache)
68 bool _is_icholder; // Is the cached value a CompiledICHolder*
69 bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
70 bool _to_interpreter; // Call it to interpreter
71 bool _release_icholder;
72 public:
73 address entry() const { return _entry; }
74 Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
75 CompiledICHolder* claim_cached_icholder() {
76 assert(_is_icholder, "");
77 assert(_cached_value != NULL, "must be non-NULL");
78 _release_icholder = false;
79 CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
80 icholder->claim();
81 return icholder;
82 }
83 bool is_optimized() const { return _is_optimized; }
84 bool to_interpreter() const { return _to_interpreter; }
85
86 void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
87 _entry = entry;
88 _cached_value = (void*)klass;
89 _to_interpreter = false;
90 _is_icholder = false;
91 _is_optimized = is_optimized;
92 _release_icholder = false;
93 }
94
95 void set_interpreter_entry(address entry, Method* method) {
96 _entry = entry;
97 _cached_value = (void*)method;
98 _to_interpreter = true;
99 _is_icholder = false;
100 _is_optimized = true;
101 _release_icholder = false;
102 }
103
104 void set_icholder_entry(address entry, CompiledICHolder* icholder) {
105 _entry = entry;
106 _cached_value = (void*)icholder;
107 _to_interpreter = true;
108 _is_icholder = true;
109 _is_optimized = false;
110 _release_icholder = true;
111 }
112
113 CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
114 _to_interpreter(false), _is_optimized(false), _release_icholder(false) {
115 }
116 ~CompiledICInfo() {
117 // In rare cases the info is computed but not used, so release any
118 // CompiledICHolder* that was created
119 if (_release_icholder) {
120 assert(_is_icholder, "must be");
121 CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
122 icholder->claim();
123 delete icholder;
124 }
125 }
126 };
127
128 class CompiledIC: public ResourceObj {
129 friend class InlineCacheBuffer;
130 friend class ICStub;
131
132
133 private:
134 NativeCall* _ic_call; // the call instruction
135 NativeMovConstReg* _value; // patchable value cell for this IC
136 bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
137
138 CompiledIC(nmethod* nm, NativeCall* ic_call);
139 CompiledIC(RelocIterator* iter);
140
141 void initialize_from_iter(RelocIterator* iter);
142
143 static bool is_icholder_entry(address entry);
144
145 // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
146 // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
147 // changes to a transition stub.
148 void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
149 void set_ic_destination(ICStub* stub);
150 void set_ic_destination(address entry_point) {
151 assert(_is_optimized, "use set_ic_destination_and_value instead");
152 internal_set_ic_destination(entry_point, false, NULL, false);
153 }
154 // This only for use by ICStubs where the type of the value isn't known
155 void set_ic_destination_and_value(address entry_point, void* value) {
156 internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
157 }
158 void set_ic_destination_and_value(address entry_point, Metadata* value) {
159 internal_set_ic_destination(entry_point, false, value, false);
160 }
161 void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
162 internal_set_ic_destination(entry_point, false, value, true);
163 }
164
165 // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
166 // associated with the inline cache.
167 address stub_address() const;
168 bool is_in_transition_state() const; // Use InlineCacheBuffer
169
170 public:
171 // conversion (machine PC to CompiledIC*)
172 friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
173 friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
174 friend CompiledIC* CompiledIC_at(Relocation* call_site);
175 friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
176
177 // This is used to release CompiledICHolder*s from nmethods that
178 // are about to be freed. The callsite might contain other stale
179 // values of other kinds so it must be careful.
180 static void cleanup_call_site(virtual_call_Relocation* call_site);
181 static bool is_icholder_call_site(virtual_call_Relocation* call_site);
182
183 // Return the cached_metadata/destination associated with this inline cache. If the cache currently points
184 // to a transition stub, it will read the values from the transition stub.
185 void* cached_value() const;
186 CompiledICHolder* cached_icholder() const {
187 assert(is_icholder_call(), "must be");
188 return (CompiledICHolder*) cached_value();
189 }
190 Metadata* cached_metadata() const {
191 assert(!is_icholder_call(), "must be");
192 return (Metadata*) cached_value();
193 }
194
195 address ic_destination() const;
196
197 bool is_optimized() const { return _is_optimized; }
198
199 // State
200 bool is_clean() const;
201 bool is_megamorphic() const;
202 bool is_call_to_compiled() const;
203 bool is_call_to_interpreted() const;
204
205 bool is_icholder_call() const;
206
207 address end_of_call() { return _ic_call->return_address(); }
208
209 // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
210 // so you are guaranteed that no patching takes place. The same goes for verify.
211 //
212 // Note: We do not provide any direct access to the stub code, to prevent parts of the code
213 // to manipulate the inline cache in MT-unsafe ways.
214 //
215 // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
216 //
217 void set_to_clean(bool in_use = true);
218 void set_to_monomorphic(CompiledICInfo& info);
219 void clear_ic_stub();
220
221 // Returns true if successful and false otherwise. The call can fail if memory
222 // allocation in the code cache fails.
223 bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
224
225 static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
226 bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
227
228 // Location
229 address instruction_address() const { return _ic_call->instruction_address(); }
230
231 // Misc
232 void print() PRODUCT_RETURN;
233 void print_compiled_ic() PRODUCT_RETURN;
234 void verify() PRODUCT_RETURN;
235 };
236
237 inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
238 CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
239 c_ic->verify();
240 return c_ic;
241 }
242
243 inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
244 CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
245 c_ic->verify();
246 return c_ic;
247 }
248
249 inline CompiledIC* CompiledIC_at(Relocation* call_site) {
250 assert(call_site->type() == relocInfo::virtual_call_type ||
251 call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
252 CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
253 c_ic->verify();
254 return c_ic;
255 }
256
257 inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
258 assert(reloc_iter->type() == relocInfo::virtual_call_type ||
259 reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
260 CompiledIC* c_ic = new CompiledIC(reloc_iter);
261 c_ic->verify();
262 return c_ic;
263 }
264
265 //-----------------------------------------------------------------------------
266 // The CompiledStaticCall represents a call to a static method in the compiled
267 //
268 // Transition diagram of a static call site is somewhat simpler than for an inlined cache:
269 //
270 //
271 // -----<----- Clean ----->-----
272 // / \
273 // / \
274 // compilled code <------------> interpreted code
275 //
276 // Clean: Calls directly to runtime method for fixup
277 // Compiled code: Calls directly to compiled code
278 // Interpreted code: Calls to stub that set Method* reference
279 //
280 //
281 class CompiledStaticCall;
282
283 class StaticCallInfo {
284 private:
285 address _entry; // Entrypoint
286 methodHandle _callee; // Callee (used when calling interpreter)
287 bool _to_interpreter; // call to interpreted method (otherwise compiled)
288
289 friend class CompiledStaticCall;
290 public:
291 address entry() const { return _entry; }
292 methodHandle callee() const { return _callee; }
293 };
294
295
296 class CompiledStaticCall: public NativeCall {
297 friend class CompiledIC;
298
299 // Also used by CompiledIC
300 void set_to_interpreted(methodHandle callee, address entry);
301 bool is_optimized_virtual();
302
303 public:
304 friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
305 friend CompiledStaticCall* compiledStaticCall_at(address native_call);
306 friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
307
308 // Code
309 static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL);
310 static int to_interp_stub_size();
311 static int reloc_to_interp_stub();
312
313 // State
314 bool is_clean() const;
315 bool is_call_to_compiled() const;
316 bool is_call_to_interpreted() const;
317
318 // Clean static call (will force resolving on next use)
319 void set_to_clean();
320
321 // Set state. The entry must be the same, as computed by compute_entry.
322 // Computation and setting is split up, since the actions are separate during
323 // a OptoRuntime::resolve_xxx.
324 void set(const StaticCallInfo& info);
325
326 // Compute entry point given a method
327 static void compute_entry(methodHandle m, StaticCallInfo& info);
328
329 // Stub support
330 address find_stub();
331 static void set_stub_to_clean(static_stub_Relocation* static_stub);
332
333 // Misc.
334 void print() PRODUCT_RETURN;
335 void verify() PRODUCT_RETURN;
336 };
337
338
339 inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
340 CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
341 st->verify();
342 return st;
343 }
344
345 inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
346 CompiledStaticCall* st = (CompiledStaticCall*)native_call;
347 st->verify();
348 return st;
349 }
350
351 inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
352 return compiledStaticCall_at(call_site->addr());
353 }
354
355 #endif // SHARE_VM_CODE_COMPILEDIC_HPP
--- EOF ---