Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/opto/callGenerator.hpp
+++ new/src/share/vm/opto/callGenerator.hpp
1 1 /*
2 2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_OPTO_CALLGENERATOR_HPP
26 26 #define SHARE_VM_OPTO_CALLGENERATOR_HPP
27 27
28 28 #include "opto/callnode.hpp"
29 29 #include "opto/compile.hpp"
30 30 #include "opto/type.hpp"
31 31 #include "runtime/deoptimization.hpp"
32 32
33 33 //---------------------------CallGenerator-------------------------------------
34 34 // The subclasses of this class handle generation of ideal nodes for
35 35 // call sites and method entry points.
36 36
37 37 class CallGenerator : public ResourceObj {
38 38 public:
39 39 enum {
40 40 xxxunusedxxx
41 41 };
42 42
43 43 private:
44 44 ciMethod* _method; // The method being called.
45 45
46 46 protected:
47 47 CallGenerator(ciMethod* method);
48 48
49 49 public:
50 50 // Accessors
51 51 ciMethod* method() const { return _method; }
52 52
53 53 // is_inline: At least some code implementing the method is copied here.
54 54 virtual bool is_inline() const { return false; }
55 55 // is_intrinsic: There's a method-specific way of generating the inline code.
56 56 virtual bool is_intrinsic() const { return false; }
57 57 // is_parse: Bytecodes implementing the specific method are copied here.
58 58 virtual bool is_parse() const { return false; }
59 59 // is_virtual: The call uses the receiver type to select or check the method.
60 60 virtual bool is_virtual() const { return false; }
61 61 // is_deferred: The decision whether to inline or not is deferred.
62 62 virtual bool is_deferred() const { return false; }
63 63 // is_predicted: Uses an explicit check against a predicted type.
64 64 virtual bool is_predicted() const { return false; }
65 65 // is_trap: Does not return to the caller. (E.g., uncommon trap.)
66 66 virtual bool is_trap() const { return false; }
67 67
68 68 // is_late_inline: supports conversion of call into an inline
69 69 virtual bool is_late_inline() const { return false; }
70 70 // Replace the call with an inline version of the code
71 71 virtual void do_late_inline() { ShouldNotReachHere(); }
72 72
73 73 virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
74 74
75 75 // Note: It is possible for a CG to be both inline and virtual.
76 76 // (The hashCode intrinsic does a vtable check and an inlined fast path.)
77 77
78 78 // Utilities:
79 79 const TypeFunc* tf() const;
80 80
81 81 // The given jvms has state and arguments for a call to my method.
82 82 // Edges after jvms->argoff() carry all (pre-popped) argument values.
83 83 //
84 84 // Update the map with state and return values (if any) and return it.
85 85 // The return values (0, 1, or 2) must be pushed on the map's stack,
86 86 // and the sp of the jvms incremented accordingly.
87 87 //
88 88 // The jvms is returned on success. Alternatively, a copy of the
89 89 // given jvms, suitably updated, may be returned, in which case the
90 90 // caller should discard the original jvms.
91 91 //
92 92 // The non-Parm edges of the returned map will contain updated global state,
93 93 // and one or two edges before jvms->sp() will carry any return values.
94 94 // Other map edges may contain locals or monitors, and should not
95 95 // be changed in meaning.
96 96 //
97 97 // If the call traps, the returned map must have a control edge of top.
98 98 // If the call can throw, the returned map must report has_exceptions().
99 99 //
100 100 // If the result is NULL, it means that this CallGenerator was unable
↓ open down ↓ |
100 lines elided |
↑ open up ↑ |
101 101 // to handle the given call, and another CallGenerator should be consulted.
102 102 virtual JVMState* generate(JVMState* jvms) = 0;
103 103
104 104 // How to generate a call site that is inlined:
105 105 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
106 106 // How to generate code for an on-stack replacement handler.
107 107 static CallGenerator* for_osr(ciMethod* m, int osr_bci);
108 108
109 109 // How to generate vanilla out-of-line call sites:
110 110 static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special
111 - static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
112 111 static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
112 + static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
113 +
114 + static CallGenerator* for_method_handle_call(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
115 + static CallGenerator* for_invokedynamic_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
113 116
114 117 static CallGenerator* for_method_handle_inline(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
115 118 static CallGenerator* for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
116 119
117 120 // How to generate a replace a direct call with an inline version
118 121 static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
119 122
120 123 // How to make a call but defer the decision whether to inline or not.
121 124 static CallGenerator* for_warm_call(WarmCallInfo* ci,
122 125 CallGenerator* if_cold,
123 126 CallGenerator* if_hot);
124 127
125 128 // How to make a call that optimistically assumes a receiver type:
126 129 static CallGenerator* for_predicted_call(ciKlass* predicted_receiver,
127 130 CallGenerator* if_missed,
128 131 CallGenerator* if_hit,
129 132 float hit_prob);
130 133
131 134 // How to make a call that optimistically assumes a MethodHandle target:
132 135 static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
133 136 CallGenerator* if_missed,
134 137 CallGenerator* if_hit,
135 138 float hit_prob);
136 139
137 140 // How to make a call that gives up and goes back to the interpreter:
138 141 static CallGenerator* for_uncommon_trap(ciMethod* m,
139 142 Deoptimization::DeoptReason reason,
140 143 Deoptimization::DeoptAction action);
141 144
142 145 // Registry for intrinsics:
143 146 static CallGenerator* for_intrinsic(ciMethod* m);
144 147 static void register_intrinsic(ciMethod* m, CallGenerator* cg);
145 148 };
146 149
147 150 class InlineCallGenerator : public CallGenerator {
148 151 virtual bool is_inline() const { return true; }
149 152
150 153 protected:
151 154 InlineCallGenerator(ciMethod* method) : CallGenerator(method) { }
152 155 };
153 156
154 157
155 158 //---------------------------WarmCallInfo--------------------------------------
156 159 // A struct to collect information about a given call site.
157 160 // Helps sort call sites into "hot", "medium", and "cold".
158 161 // Participates in the queueing of "medium" call sites for possible inlining.
159 162 class WarmCallInfo : public ResourceObj {
160 163 private:
161 164
162 165 CallNode* _call; // The CallNode which may be inlined.
163 166 CallGenerator* _hot_cg;// CG for expanding the call node
164 167
165 168 // These are the metrics we use to evaluate call sites:
166 169
167 170 float _count; // How often do we expect to reach this site?
168 171 float _profit; // How much time do we expect to save by inlining?
169 172 float _work; // How long do we expect the average call to take?
170 173 float _size; // How big do we expect the inlined code to be?
171 174
172 175 float _heat; // Combined score inducing total order on call sites.
173 176 WarmCallInfo* _next; // Next cooler call info in pending queue.
174 177
175 178 // Count is the number of times this call site is expected to be executed.
176 179 // Large count is favorable for inlining, because the extra compilation
177 180 // work will be amortized more completely.
178 181
179 182 // Profit is a rough measure of the amount of time we expect to save
180 183 // per execution of this site if we inline it. (1.0 == call overhead)
181 184 // Large profit favors inlining. Negative profit disables inlining.
182 185
183 186 // Work is a rough measure of the amount of time a typical out-of-line
184 187 // call from this site is expected to take. (1.0 == call, no-op, return)
185 188 // Small work is somewhat favorable for inlining, since methods with
186 189 // short "hot" traces are more likely to inline smoothly.
187 190
188 191 // Size is the number of graph nodes we expect this method to produce,
189 192 // not counting the inlining of any further warm calls it may include.
190 193 // Small size favors inlining, since small methods are more likely to
191 194 // inline smoothly. The size is estimated by examining the native code
192 195 // if available. The method bytecodes are also examined, assuming
193 196 // empirically observed node counts for each kind of bytecode.
194 197
195 198 // Heat is the combined "goodness" of a site's inlining. If we were
196 199 // omniscient, it would be the difference of two sums of future execution
197 200 // times of code emitted for this site (amortized across multiple sites if
198 201 // sharing applies). The two sums are for versions of this call site with
199 202 // and without inlining.
200 203
201 204 // We approximate this mythical quantity by playing with averages,
202 205 // rough estimates, and assumptions that history repeats itself.
203 206 // The basic formula count * profit is heuristically adjusted
204 207 // by looking at the expected compilation and execution times of
205 208 // of the inlined call.
206 209
207 210 // Note: Some of these metrics may not be present in the final product,
208 211 // but exist in development builds to experiment with inline policy tuning.
209 212
210 213 // This heuristic framework does not model well the very significant
211 214 // effects of multiple-level inlining. It is possible to see no immediate
212 215 // profit from inlining X->Y, but to get great profit from a subsequent
213 216 // inlining X->Y->Z.
214 217
215 218 // This framework does not take well into account the problem of N**2 code
216 219 // size in a clique of mutually inlinable methods.
217 220
218 221 WarmCallInfo* next() const { return _next; }
219 222 void set_next(WarmCallInfo* n) { _next = n; }
220 223
221 224 static WarmCallInfo _always_hot;
222 225 static WarmCallInfo _always_cold;
223 226
224 227 // Constructor intitialization of always_hot and always_cold
225 228 WarmCallInfo(float c, float p, float w, float s) {
226 229 _call = NULL;
227 230 _hot_cg = NULL;
228 231 _next = NULL;
229 232 _count = c;
230 233 _profit = p;
231 234 _work = w;
232 235 _size = s;
233 236 _heat = 0;
234 237 }
235 238
236 239 public:
237 240 // Because WarmInfo objects live over the entire lifetime of the
238 241 // Compile object, they are allocated into the comp_arena, which
239 242 // does not get resource marked or reset during the compile process
240 243 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
241 244 void operator delete( void * ) { } // fast deallocation
242 245
243 246 static WarmCallInfo* always_hot();
244 247 static WarmCallInfo* always_cold();
245 248
246 249 WarmCallInfo() {
247 250 _call = NULL;
248 251 _hot_cg = NULL;
249 252 _next = NULL;
250 253 _count = _profit = _work = _size = _heat = 0;
251 254 }
252 255
253 256 CallNode* call() const { return _call; }
254 257 float count() const { return _count; }
255 258 float size() const { return _size; }
256 259 float work() const { return _work; }
257 260 float profit() const { return _profit; }
258 261 float heat() const { return _heat; }
259 262
260 263 void set_count(float x) { _count = x; }
261 264 void set_size(float x) { _size = x; }
262 265 void set_work(float x) { _work = x; }
263 266 void set_profit(float x) { _profit = x; }
264 267 void set_heat(float x) { _heat = x; }
265 268
266 269 // Load initial heuristics from profiles, etc.
267 270 // The heuristics can be tweaked further by the caller.
268 271 void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor);
269 272
270 273 static float MAX_VALUE() { return +1.0e10; }
271 274 static float MIN_VALUE() { return -1.0e10; }
272 275
273 276 float compute_heat() const;
274 277
275 278 void set_call(CallNode* call) { _call = call; }
276 279 void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; }
277 280
278 281 // Do not queue very hot or very cold calls.
279 282 // Make very cold ones out of line immediately.
280 283 // Inline very hot ones immediately.
281 284 // These queries apply various tunable limits
282 285 // to the above metrics in a systematic way.
283 286 // Test for coldness before testing for hotness.
284 287 bool is_cold() const;
285 288 bool is_hot() const;
286 289
287 290 // Force a warm call to be hot. This worklists the call node for inlining.
288 291 void make_hot();
289 292
290 293 // Force a warm call to be cold. This worklists the call node for out-of-lining.
291 294 void make_cold();
292 295
293 296 // A reproducible total ordering, in which heat is the major key.
294 297 bool warmer_than(WarmCallInfo* that);
295 298
296 299 // List management. These methods are called with the list head,
297 300 // and return the new list head, inserting or removing the receiver.
298 301 WarmCallInfo* insert_into(WarmCallInfo* head);
299 302 WarmCallInfo* remove_from(WarmCallInfo* head);
300 303
301 304 #ifndef PRODUCT
302 305 void print() const;
303 306 void print_all() const;
304 307 int count_all() const;
305 308 #endif
306 309 };
307 310
308 311 #endif // SHARE_VM_OPTO_CALLGENERATOR_HPP
↓ open down ↓ |
186 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX