66 // The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
67 //
68 // The numbers in square brackets refere to the kind of transition:
69 // [1]: Initial fixup. Receiver it found from debug information
70 // [2]: Compilation of a method
71 // [3]: Recompilation of a method (note: only entry is changed. The klassOop must stay the same)
72 // [4]: Inline cache miss. We go directly to megamorphic call.
73 //
74 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
75 // transition is made to a stub.
76 //
77 class CompiledIC;
78
79 class CompiledICInfo {
80 friend class CompiledIC;
81 private:
82 address _entry; // entry point for call
83 Handle _cached_oop; // Value of cached_oop (either in stub or inline cache)
84 bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
85 bool _to_interpreter; // Call it to interpreter
86 public:
87 address entry() const { return _entry; }
88 Handle cached_oop() const { return _cached_oop; }
89 bool is_optimized() const { return _is_optimized; }
90 };
91
92 class CompiledIC: public ResourceObj {
93 friend class InlineCacheBuffer;
94 friend class ICStub;
95
96
97 private:
98 NativeCall* _ic_call; // the call instruction
99 oop* _oop_addr; // patchable oop cell for this IC
100 RelocIterator _oops; // iteration over any and all set-oop instructions
101 bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
102
103 CompiledIC(NativeCall* ic_call);
104 CompiledIC(Relocation* ic_reloc); // Must be of virtual_call_type/opt_virtual_call_type
105
106 // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
107 // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
108 // changes to a transition stub.
109 void set_ic_destination(address entry_point);
110 void set_cached_oop(oop cache);
111
112 // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
113 // associated with the inline cache.
114 address stub_address() const;
115 bool is_in_transition_state() const; // Use InlineCacheBuffer
116
117 public:
118 // conversion (machine PC to CompiledIC*)
119 friend CompiledIC* CompiledIC_before(address return_addr);
120 friend CompiledIC* CompiledIC_at(address call_site);
121 friend CompiledIC* CompiledIC_at(Relocation* call_site);
122
123 // Return the cached_oop/destination associated with this inline cache. If the cache currently points
124 // to a transition stub, it will read the values from the transition stub.
125 oop cached_oop() const;
126 address ic_destination() const;
127
128 bool is_optimized() const { return _is_optimized; }
129
130 // State
131 bool is_clean() const;
132 bool is_megamorphic() const;
133 bool is_call_to_compiled() const;
134 bool is_call_to_interpreted() const;
135
136 address end_of_call() { return _ic_call->return_address(); }
137
138 // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
139 // so you are guaranteed that no patching takes place. The same goes for verify.
140 //
141 // Note: We do not provide any direct access to the stub code, to prevent parts of the code
142 // to manipulate the inline cache in MT-unsafe ways.
143 //
144 // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
145 //
146 void set_to_clean(); // Can only be called during a safepoint operation
147 void set_to_monomorphic(const CompiledICInfo& info);
148 void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
149
150 static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
151 bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
152
153 // Location
154 address instruction_address() const { return _ic_call->instruction_address(); }
155
156 // Misc
157 void print() PRODUCT_RETURN;
158 void print_compiled_ic() PRODUCT_RETURN;
159 void verify() PRODUCT_RETURN;
160 };
161
162 inline CompiledIC* CompiledIC_before(address return_addr) {
163 CompiledIC* c_ic = new CompiledIC(nativeCall_before(return_addr));
164 c_ic->verify();
165 return c_ic;
166 }
167
168 inline CompiledIC* CompiledIC_at(address call_site) {
169 CompiledIC* c_ic = new CompiledIC(nativeCall_at(call_site));
170 c_ic->verify();
171 return c_ic;
172 }
173
174 inline CompiledIC* CompiledIC_at(Relocation* call_site) {
175 CompiledIC* c_ic = new CompiledIC(call_site);
176 c_ic->verify();
177 return c_ic;
178 }
179
184 // Transition diagram of a static call site is somewhat simpler than for an inlined cache:
185 //
186 //
187 // -----<----- Clean ----->-----
188 // / \
189 // / \
190 // compilled code <------------> interpreted code
191 //
192 // Clean: Calls directly to runtime method for fixup
193 // Compiled code: Calls directly to compiled code
194 // Interpreted code: Calls to stub that set methodOop reference
195 //
196 //
197 class CompiledStaticCall;
198
199 class StaticCallInfo {
200 private:
201 address _entry; // Entrypoint
202 methodHandle _callee; // Callee (used when calling interpreter)
203 bool _to_interpreter; // call to interpreted method (otherwise compiled)
204
205 friend class CompiledStaticCall;
206 public:
207 address entry() const { return _entry; }
208 methodHandle callee() const { return _callee; }
209 };
210
211
212 class CompiledStaticCall: public NativeCall {
213 friend class CompiledIC;
214
215 // Also used by CompiledIC
216 void set_to_interpreted(methodHandle callee, address entry);
217 bool is_optimized_virtual();
218
219 public:
220 friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
221 friend CompiledStaticCall* compiledStaticCall_at(address native_call);
222 friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
223
224 // State
225 bool is_clean() const;
226 bool is_call_to_compiled() const;
227 bool is_call_to_interpreted() const;
228
229 // Clean static call (will force resolving on next use)
230 void set_to_clean();
231
232 // Set state. The entry must be the same, as computed by compute_entry.
233 // Computation and setting is split up, since the actions are separate during
234 // a OptoRuntime::resolve_xxx.
235 void set(const StaticCallInfo& info);
236
237 // Compute entry point given a method
238 static void compute_entry(methodHandle m, StaticCallInfo& info);
239
240 // Stub support
241 address find_stub();
242 static void set_stub_to_clean(static_stub_Relocation* static_stub);
243
244 // Misc.
245 void print() PRODUCT_RETURN;
246 void verify() PRODUCT_RETURN;
247 };
248
249
250 inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
251 CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
252 st->verify();
253 return st;
254 }
255
256 inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
257 CompiledStaticCall* st = (CompiledStaticCall*)native_call;
258 st->verify();
259 return st;
260 }
261
262 inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
263 return compiledStaticCall_at(call_site->addr());
264 }
265
266 #endif // SHARE_VM_CODE_COMPILEDIC_HPP
|
66 // The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
67 //
68 // The numbers in square brackets refere to the kind of transition:
69 // [1]: Initial fixup. Receiver it found from debug information
70 // [2]: Compilation of a method
71 // [3]: Recompilation of a method (note: only entry is changed. The klassOop must stay the same)
72 // [4]: Inline cache miss. We go directly to megamorphic call.
73 //
74 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
75 // transition is made to a stub.
76 //
77 class CompiledIC;
78
79 class CompiledICInfo {
80 friend class CompiledIC;
81 private:
82 address _entry; // entry point for call
83 Handle _cached_oop; // Value of cached_oop (either in stub or inline cache)
84 bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
85 bool _to_interpreter; // Call it to interpreter
86 bool _is_profiled;
87 public:
88 address entry() const { return _entry; }
89 Handle cached_oop() const { return _cached_oop; }
90 bool is_optimized() const { return _is_optimized; }
91 bool is_profiled() const { return _is_profiled; }
92 };
93
94 // Support for c1 profile based inlining. Some call sites are
95 // instrumented to gather profiling data used to drive further
96 // inlining through recompilation. The instrumentation code consists
97 // in incrementing a per call site counter stored in the MDO, testing
98 // whether it crosses a threshold, branching to the runtime if it is
99 // the case, jumping to the callee otherwise.
100 //
101 // The compiler identifies the candidate call sites and generates a
102 // stub similar to the static call stub in the nmethod's stub
103 // area. The profile call stub performs the following step:
104 // 1- load mdo pointer in register
105 // 2- increment counter for call site
106 // 3- branch to runtime if counter crosses threshold
107 // 4- jump to callee
108 //
109 // On call site resolution, for a call to a compiled method, the jump
110 // (4- above) is patched with the resolve call site info (to continue
111 // to callee's code or transition stub) then the call site is patched
112 // to point to the profile call stub. Profiling can be later fully
113 // disabled for the call site (if the call site is polymorphic or if
114 // the compilation policy finds it's better to not profile the call
115 // site anymore) by reresolving the call.
116 //
117 class CompiledProfile {
118
119 private:
120
121 friend class CompiledStaticCall;
122 static bool is_profiled(NativeCall* call);
123 static address profile_target(NativeCall* call);
124
125 protected:
126
127 address profile_target() const;
128 address find_profile_stub() const;
129 void set_up_profiling(address entry_point);
130
131 virtual NativeCall* call_instr() const = 0;
132
133 public:
134 bool is_profiled() const; // Use InlineCacheBuffer
135
136 static address find_profile_stub(NativeCall* call);
137 static void set_up_profiling(NativeCall* call, address stub, address entry_point);
138 static bool is_call_to_stub(NativeCall* call, address stub);
139 };
140
141 class CompiledIC: public ResourceObj, CompiledProfile {
142 friend class InlineCacheBuffer;
143 friend class ICStub;
144
145
146 private:
147 NativeCall* _ic_call; // the call instruction
148 oop* _oop_addr; // patchable oop cell for this IC
149 RelocIterator _oops; // iteration over any and all set-oop instructions
150 bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
151
152 CompiledIC(NativeCall* ic_call);
153 CompiledIC(Relocation* ic_reloc); // Must be of virtual_call_type/opt_virtual_call_type
154
155 // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
156 // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
157 // changes to a transition stub.
158 public:
159 void set_ic_destination(address entry_point, bool set_profiled);
160 private:
161 void set_cached_oop(oop cache);
162
163 // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
164 // associated with the inline cache.
165 address stub_address() const;
166
167 protected:
168 NativeCall* call_instr() const { return _ic_call; }
169
170 public:
171 bool is_in_transition_state() const; // Use InlineCacheBuffer
172
173 public:
174 // conversion (machine PC to CompiledIC*)
175 friend CompiledIC* CompiledIC_before(address return_addr);
176 friend CompiledIC* CompiledIC_at(address call_site);
177 friend CompiledIC* CompiledIC_at(Relocation* call_site);
178
179 // Return the cached_oop/destination associated with this inline cache. If the cache currently points
180 // to a transition stub, it will read the values from the transition stub.
181 oop cached_oop() const;
182 address ic_destination() const;
183
184 bool is_optimized() const { return _is_optimized; }
185
186 // State
187 bool is_clean() const;
188 bool is_megamorphic() const;
189 bool is_call_to_compiled() const;
190 bool is_call_to_interpreted() const;
191
192 address end_of_call() { return _ic_call->return_address(); }
193
194 // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
195 // so you are guaranteed that no patching takes place. The same goes for verify.
196 //
197 // Note: We do not provide any direct access to the stub code, to prevent parts of the code
198 // to manipulate the inline cache in MT-unsafe ways.
199 //
200 // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
201 //
202 void set_to_clean(); // Can only be called during a safepoint operation
203 void set_to_monomorphic(const CompiledICInfo& info);
204 void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
205
206 static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
207 bool is_optimized, bool static_bound, CompiledICInfo& info, bool is_profiled, TRAPS);
208
209 // Location
210 address instruction_address() const { return _ic_call->instruction_address(); }
211
212 // Misc
213 void print() PRODUCT_RETURN;
214 void print_compiled_ic() PRODUCT_RETURN;
215 void verify() PRODUCT_RETURN;
216
217 void drop_profiling();
218 };
219
220 inline CompiledIC* CompiledIC_before(address return_addr) {
221 CompiledIC* c_ic = new CompiledIC(nativeCall_before(return_addr));
222 c_ic->verify();
223 return c_ic;
224 }
225
226 inline CompiledIC* CompiledIC_at(address call_site) {
227 CompiledIC* c_ic = new CompiledIC(nativeCall_at(call_site));
228 c_ic->verify();
229 return c_ic;
230 }
231
232 inline CompiledIC* CompiledIC_at(Relocation* call_site) {
233 CompiledIC* c_ic = new CompiledIC(call_site);
234 c_ic->verify();
235 return c_ic;
236 }
237
242 // Transition diagram of a static call site is somewhat simpler than for an inlined cache:
243 //
244 //
245 // -----<----- Clean ----->-----
246 // / \
247 // / \
248 // compilled code <------------> interpreted code
249 //
250 // Clean: Calls directly to runtime method for fixup
251 // Compiled code: Calls directly to compiled code
252 // Interpreted code: Calls to stub that set methodOop reference
253 //
254 //
255 class CompiledStaticCall;
256
257 class StaticCallInfo {
258 private:
259 address _entry; // Entrypoint
260 methodHandle _callee; // Callee (used when calling interpreter)
261 bool _to_interpreter; // call to interpreted method (otherwise compiled)
262 bool _is_profiled;
263
264 friend class CompiledStaticCall;
265 public:
266 address entry() const { return _entry; }
267 methodHandle callee() const { return _callee; }
268 bool is_profiled() const { return _is_profiled; }
269 };
270
271
272 class CompiledStaticCall: public NativeCall {
273 friend class CompiledIC;
274
275 // Also used by CompiledIC
276 void set_to_interpreted(methodHandle callee, address entry);
277 bool is_optimized_virtual();
278
279 private:
280
281 NativeCall* call_instr() const { return (NativeCall*)this; }
282
283 public:
284 friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
285 friend CompiledStaticCall* compiledStaticCall_at(address native_call);
286 friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
287
288 // State
289 bool is_clean() const;
290 bool is_call_to_compiled() const;
291 bool is_call_to_interpreted() const;
292
293 // Clean static call (will force resolving on next use)
294 void set_to_clean();
295
296 // Set state. The entry must be the same, as computed by compute_entry.
297 // Computation and setting is split up, since the actions are separate during
298 // a OptoRuntime::resolve_xxx.
299 void set(const StaticCallInfo& info);
300
301 // Compute entry point given a method
302 static void compute_entry(methodHandle m, StaticCallInfo& info, bool is_profiled = false);
303
304 // Stub support
305 address find_stub();
306 static void set_stub_to_clean(static_stub_Relocation* static_stub);
307
308 // Misc.
309 void print() PRODUCT_RETURN;
310 void verify() PRODUCT_RETURN;
311
312 void drop_profiling();
313
314 address destination() const;
315 };
316
317
318 inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
319 CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
320 st->verify();
321 return st;
322 }
323
324 inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
325 CompiledStaticCall* st = (CompiledStaticCall*)native_call;
326 st->verify();
327 return st;
328 }
329
330 inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
331 return compiledStaticCall_at(call_site->addr());
332 }
333
334 #endif // SHARE_VM_CODE_COMPILEDIC_HPP
|