Print this page
rev 1025 : imported patch indy.compiler.patch
rev 1026 : imported patch indy.compiler.inline.patch
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/opto/callGenerator.hpp
+++ new/src/share/vm/opto/callGenerator.hpp
1 1 /*
2 2 * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 //---------------------------CallGenerator-------------------------------------
26 26 // The subclasses of this class handle generation of ideal nodes for
27 27 // call sites and method entry points.
28 28
29 29 class CallGenerator : public ResourceObj {
30 30 public:
31 31 enum {
32 32 xxxunusedxxx
33 33 };
34 34
35 35 private:
36 36 ciMethod* _method; // The method being called.
37 37
38 38 protected:
39 39 CallGenerator(ciMethod* method);
40 40
41 41 public:
42 42 // Accessors
43 43 ciMethod* method() const { return _method; }
44 44
45 45 // is_inline: At least some code implementing the method is copied here.
46 46 virtual bool is_inline() const { return false; }
47 47 // is_intrinsic: There's a method-specific way of generating the inline code.
48 48 virtual bool is_intrinsic() const { return false; }
49 49 // is_parse: Bytecodes implementing the specific method are copied here.
50 50 virtual bool is_parse() const { return false; }
51 51 // is_virtual: The call uses the receiver type to select or check the method.
52 52 virtual bool is_virtual() const { return false; }
53 53 // is_deferred: The decision whether to inline or not is deferred.
54 54 virtual bool is_deferred() const { return false; }
55 55 // is_predicted: Uses an explicit check against a predicted type.
56 56 virtual bool is_predicted() const { return false; }
57 57 // is_trap: Does not return to the caller. (E.g., uncommon trap.)
58 58 virtual bool is_trap() const { return false; }
59 59
60 60 // Note: It is possible for a CG to be both inline and virtual.
61 61 // (The hashCode intrinsic does a vtable check and an inlined fast path.)
62 62
63 63 // Utilities:
64 64 const TypeFunc* tf() const;
65 65
66 66 // The given jvms has state and arguments for a call to my method.
67 67 // Edges after jvms->argoff() carry all (pre-popped) argument values.
68 68 //
69 69 // Update the map with state and return values (if any) and return it.
70 70 // The return values (0, 1, or 2) must be pushed on the map's stack,
71 71 // and the sp of the jvms incremented accordingly.
72 72 //
73 73 // The jvms is returned on success. Alternatively, a copy of the
74 74 // given jvms, suitably updated, may be returned, in which case the
75 75 // caller should discard the original jvms.
76 76 //
77 77 // The non-Parm edges of the returned map will contain updated global state,
78 78 // and one or two edges before jvms->sp() will carry any return values.
79 79 // Other map edges may contain locals or monitors, and should not
80 80 // be changed in meaning.
81 81 //
82 82 // If the call traps, the returned map must have a control edge of top.
83 83 // If the call can throw, the returned map must report has_exceptions().
84 84 //
85 85 // If the result is NULL, it means that this CallGenerator was unable
86 86 // to handle the given call, and another CallGenerator should be consulted.
87 87 virtual JVMState* generate(JVMState* jvms) = 0;
88 88
89 89 // How to generate a call site that is inlined:
90 90 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
91 91 // How to generate code for an on-stack replacement handler.
92 92 static CallGenerator* for_osr(ciMethod* m, int osr_bci);
93 93
94 94 // How to generate vanilla out-of-line call sites:
95 95 static CallGenerator* for_direct_call(ciMethod* m); // static, special
96 96 static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
97 97 static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
98 98
99 99 // How to make a call but defer the decision whether to inline or not.
↓ open down ↓ |
99 lines elided |
↑ open up ↑ |
100 100 static CallGenerator* for_warm_call(WarmCallInfo* ci,
101 101 CallGenerator* if_cold,
102 102 CallGenerator* if_hot);
103 103
104 104 // How to make a call that optimistically assumes a receiver type:
105 105 static CallGenerator* for_predicted_call(ciKlass* predicted_receiver,
106 106 CallGenerator* if_missed,
107 107 CallGenerator* if_hit,
108 108 float hit_prob);
109 109
110 + // How to make a call that optimistically assumes a MethodHandle target:
111 + static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
112 + CallGenerator* if_missed,
113 + CallGenerator* if_hit,
114 + float hit_prob);
115 +
110 116 // How to make a call that gives up and goes back to the interpreter:
111 117 static CallGenerator* for_uncommon_trap(ciMethod* m,
112 118 Deoptimization::DeoptReason reason,
113 119 Deoptimization::DeoptAction action);
114 120
115 121 // Registry for intrinsics:
116 122 static CallGenerator* for_intrinsic(ciMethod* m);
117 123 static void register_intrinsic(ciMethod* m, CallGenerator* cg);
118 124 };
119 125
120 126 class InlineCallGenerator : public CallGenerator {
121 127 virtual bool is_inline() const { return true; }
122 128
123 129 protected:
124 130 InlineCallGenerator(ciMethod* method) : CallGenerator(method) { }
125 131 };
126 132
127 133
128 134 //---------------------------WarmCallInfo--------------------------------------
129 135 // A struct to collect information about a given call site.
130 136 // Helps sort call sites into "hot", "medium", and "cold".
131 137 // Participates in the queueing of "medium" call sites for possible inlining.
132 138 class WarmCallInfo : public ResourceObj {
133 139 private:
134 140
135 141 CallNode* _call; // The CallNode which may be inlined.
136 142 CallGenerator* _hot_cg;// CG for expanding the call node
137 143
138 144 // These are the metrics we use to evaluate call sites:
139 145
140 146 float _count; // How often do we expect to reach this site?
141 147 float _profit; // How much time do we expect to save by inlining?
142 148 float _work; // How long do we expect the average call to take?
143 149 float _size; // How big do we expect the inlined code to be?
144 150
145 151 float _heat; // Combined score inducing total order on call sites.
146 152 WarmCallInfo* _next; // Next cooler call info in pending queue.
147 153
148 154 // Count is the number of times this call site is expected to be executed.
149 155 // Large count is favorable for inlining, because the extra compilation
150 156 // work will be amortized more completely.
151 157
152 158 // Profit is a rough measure of the amount of time we expect to save
153 159 // per execution of this site if we inline it. (1.0 == call overhead)
154 160 // Large profit favors inlining. Negative profit disables inlining.
155 161
156 162 // Work is a rough measure of the amount of time a typical out-of-line
157 163 // call from this site is expected to take. (1.0 == call, no-op, return)
158 164 // Small work is somewhat favorable for inlining, since methods with
159 165 // short "hot" traces are more likely to inline smoothly.
160 166
161 167 // Size is the number of graph nodes we expect this method to produce,
162 168 // not counting the inlining of any further warm calls it may include.
163 169 // Small size favors inlining, since small methods are more likely to
164 170 // inline smoothly. The size is estimated by examining the native code
165 171 // if available. The method bytecodes are also examined, assuming
166 172 // empirically observed node counts for each kind of bytecode.
167 173
168 174 // Heat is the combined "goodness" of a site's inlining. If we were
169 175 // omniscient, it would be the difference of two sums of future execution
170 176 // times of code emitted for this site (amortized across multiple sites if
171 177 // sharing applies). The two sums are for versions of this call site with
172 178 // and without inlining.
173 179
174 180 // We approximate this mythical quantity by playing with averages,
175 181 // rough estimates, and assumptions that history repeats itself.
176 182 // The basic formula count * profit is heuristically adjusted
177 183 // by looking at the expected compilation and execution times of
178 184 // of the inlined call.
179 185
180 186 // Note: Some of these metrics may not be present in the final product,
181 187 // but exist in development builds to experiment with inline policy tuning.
182 188
183 189 // This heuristic framework does not model well the very significant
184 190 // effects of multiple-level inlining. It is possible to see no immediate
185 191 // profit from inlining X->Y, but to get great profit from a subsequent
186 192 // inlining X->Y->Z.
187 193
188 194 // This framework does not take well into account the problem of N**2 code
189 195 // size in a clique of mutually inlinable methods.
190 196
191 197 WarmCallInfo* next() const { return _next; }
192 198 void set_next(WarmCallInfo* n) { _next = n; }
193 199
194 200 static WarmCallInfo* _always_hot;
195 201 static WarmCallInfo* _always_cold;
196 202
197 203 public:
198 204 // Because WarmInfo objects live over the entire lifetime of the
199 205 // Compile object, they are allocated into the comp_arena, which
200 206 // does not get resource marked or reset during the compile process
201 207 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
202 208 void operator delete( void * ) { } // fast deallocation
203 209
204 210 static WarmCallInfo* always_hot();
205 211 static WarmCallInfo* always_cold();
206 212
207 213 WarmCallInfo() {
208 214 _call = NULL;
209 215 _hot_cg = NULL;
210 216 _next = NULL;
211 217 _count = _profit = _work = _size = _heat = 0;
212 218 }
213 219
214 220 CallNode* call() const { return _call; }
215 221 float count() const { return _count; }
216 222 float size() const { return _size; }
217 223 float work() const { return _work; }
218 224 float profit() const { return _profit; }
219 225 float heat() const { return _heat; }
220 226
221 227 void set_count(float x) { _count = x; }
222 228 void set_size(float x) { _size = x; }
223 229 void set_work(float x) { _work = x; }
224 230 void set_profit(float x) { _profit = x; }
225 231 void set_heat(float x) { _heat = x; }
226 232
227 233 // Load initial heuristics from profiles, etc.
228 234 // The heuristics can be tweaked further by the caller.
229 235 void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor);
230 236
231 237 static float MAX_VALUE() { return +1.0e10; }
232 238 static float MIN_VALUE() { return -1.0e10; }
233 239
234 240 float compute_heat() const;
235 241
236 242 void set_call(CallNode* call) { _call = call; }
237 243 void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; }
238 244
239 245 // Do not queue very hot or very cold calls.
240 246 // Make very cold ones out of line immediately.
241 247 // Inline very hot ones immediately.
242 248 // These queries apply various tunable limits
243 249 // to the above metrics in a systematic way.
244 250 // Test for coldness before testing for hotness.
245 251 bool is_cold() const;
246 252 bool is_hot() const;
247 253
248 254 // Force a warm call to be hot. This worklists the call node for inlining.
249 255 void make_hot();
250 256
251 257 // Force a warm call to be cold. This worklists the call node for out-of-lining.
252 258 void make_cold();
253 259
254 260 // A reproducible total ordering, in which heat is the major key.
255 261 bool warmer_than(WarmCallInfo* that);
256 262
257 263 // List management. These methods are called with the list head,
258 264 // and return the new list head, inserting or removing the receiver.
259 265 WarmCallInfo* insert_into(WarmCallInfo* head);
260 266 WarmCallInfo* remove_from(WarmCallInfo* head);
261 267
262 268 #ifndef PRODUCT
263 269 void print() const;
264 270 void print_all() const;
265 271 int count_all() const;
266 272 #endif
267 273 };
↓ open down ↓ |
148 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX