Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/runtime/advancedThresholdPolicy.cpp
+++ new/src/share/vm/runtime/advancedThresholdPolicy.cpp
1 1 /*
2 2 * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "runtime/advancedThresholdPolicy.hpp"
27 27 #include "runtime/simpleThresholdPolicy.inline.hpp"
28 28
29 29 #ifdef TIERED
30 30 // Print an event.
31 31 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
32 32 int bci, CompLevel level) {
33 33 tty->print(" rate: ");
34 34 if (mh->prev_time() == 0) tty->print("n/a");
35 35 else tty->print("%f", mh->rate());
36 36
37 37 tty->print(" k: %.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
38 38 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
39 39
40 40 }
41 41
42 42 void AdvancedThresholdPolicy::initialize() {
43 43 // Turn on ergonomic compiler count selection
44 44 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
45 45 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
46 46 }
47 47 int count = CICompilerCount;
48 48 if (CICompilerCountPerCPU) {
49 49 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
50 50 int log_cpu = log2_intptr(os::active_processor_count());
51 51 int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
52 52 count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
53 53 }
54 54
55 55 set_c1_count(MAX2(count / 3, 1));
56 56 set_c2_count(MAX2(count - count / 3, 1));
57 57
58 58 // Some inlining tuning
59 59 #ifdef X86
60 60 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
61 61 FLAG_SET_DEFAULT(InlineSmallCode, 2000);
62 62 }
63 63 #endif
64 64
65 65 #ifdef SPARC
66 66 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
67 67 FLAG_SET_DEFAULT(InlineSmallCode, 2500);
68 68 }
69 69 #endif
70 70
71 71
72 72 set_start_time(os::javaTimeMillis());
73 73 }
74 74
75 75 // update_rate() is called from select_task() while holding a compile queue lock.
76 76 void AdvancedThresholdPolicy::update_rate(jlong t, methodOop m) {
77 77 if (is_old(m)) {
78 78 // We don't remove old methods from the queue,
79 79 // so we can just zero the rate.
80 80 m->set_rate(0);
81 81 return;
82 82 }
83 83
84 84 // We don't update the rate if we've just came out of a safepoint.
85 85 // delta_s is the time since last safepoint in milliseconds.
86 86 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
87 87 jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
88 88 // How many events were there since the last time?
89 89 int event_count = m->invocation_count() + m->backedge_count();
90 90 int delta_e = event_count - m->prev_event_count();
91 91
92 92 // We should be running for at least 1ms.
93 93 if (delta_s >= TieredRateUpdateMinTime) {
94 94 // And we must've taken the previous point at least 1ms before.
95 95 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
96 96 m->set_prev_time(t);
97 97 m->set_prev_event_count(event_count);
98 98 m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
99 99 } else
100 100 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
101 101 // If nothing happened for 25ms, zero the rate. Don't modify prev values.
102 102 m->set_rate(0);
103 103 }
104 104 }
105 105 }
106 106
107 107 // Check if this method has been stale from a given number of milliseconds.
108 108 // See select_task().
109 109 bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, methodOop m) {
110 110 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
111 111 jlong delta_t = t - m->prev_time();
112 112 if (delta_t > timeout && delta_s > timeout) {
113 113 int event_count = m->invocation_count() + m->backedge_count();
114 114 int delta_e = event_count - m->prev_event_count();
115 115 // Return true if there were no events.
116 116 return delta_e == 0;
117 117 }
118 118 return false;
119 119 }
120 120
121 121 // We don't remove old methods from the compile queue even if they have
122 122 // very low activity. See select_task().
123 123 bool AdvancedThresholdPolicy::is_old(methodOop method) {
124 124 return method->invocation_count() > 50000 || method->backedge_count() > 500000;
125 125 }
126 126
127 127 double AdvancedThresholdPolicy::weight(methodOop method) {
128 128 return (method->rate() + 1) * ((method->invocation_count() + 1) * (method->backedge_count() + 1));
129 129 }
130 130
131 131 // Apply heuristics and return true if x should be compiled before y
132 132 bool AdvancedThresholdPolicy::compare_methods(methodOop x, methodOop y) {
133 133 if (x->highest_comp_level() > y->highest_comp_level()) {
134 134 // recompilation after deopt
135 135 return true;
136 136 } else
137 137 if (x->highest_comp_level() == y->highest_comp_level()) {
138 138 if (weight(x) > weight(y)) {
139 139 return true;
140 140 }
141 141 }
142 142 return false;
143 143 }
144 144
145 145 // Is method profiled enough?
146 146 bool AdvancedThresholdPolicy::is_method_profiled(methodOop method) {
147 147 methodDataOop mdo = method->method_data();
148 148 if (mdo != NULL) {
149 149 int i = mdo->invocation_count_delta();
150 150 int b = mdo->backedge_count_delta();
151 151 return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
152 152 }
153 153 return false;
154 154 }
155 155
156 156 // Called with the queue locked and with at least one element
157 157 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
158 158 CompileTask *max_task = NULL;
159 159 methodOop max_method;
160 160 jlong t = os::javaTimeMillis();
161 161 // Iterate through the queue and find a method with a maximum rate.
162 162 for (CompileTask* task = compile_queue->first(); task != NULL;) {
163 163 CompileTask* next_task = task->next();
164 164 methodOop method = (methodOop)JNIHandles::resolve(task->method_handle());
165 165 methodDataOop mdo = method->method_data();
166 166 update_rate(t, method);
167 167 if (max_task == NULL) {
168 168 max_task = task;
169 169 max_method = method;
170 170 } else {
171 171 // If a method has been stale for some time, remove it from the queue.
172 172 if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
173 173 if (PrintTieredEvents) {
174 174 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
175 175 }
176 176 CompileTaskWrapper ctw(task); // Frees the task
177 177 compile_queue->remove(task);
178 178 method->clear_queued_for_compilation();
179 179 task = next_task;
180 180 continue;
181 181 }
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
182 182
183 183 // Select a method with a higher rate
184 184 if (compare_methods(method, max_method)) {
185 185 max_task = task;
186 186 max_method = method;
187 187 }
188 188 }
189 189 task = next_task;
190 190 }
191 191
192 - if (max_task->comp_level() == CompLevel_full_profile && is_method_profiled(max_method)) {
192 + if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
193 + && is_method_profiled(max_method)) {
193 194 max_task->set_comp_level(CompLevel_limited_profile);
194 195 if (PrintTieredEvents) {
195 196 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
196 197 }
197 198 }
198 199
199 200 return max_task;
200 201 }
201 202
202 203 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
203 204 double queue_size = CompileBroker::queue_size(level);
204 205 int comp_count = compiler_count(level);
205 206 double k = queue_size / (feedback_k * comp_count) + 1;
206 207 return k;
207 208 }
208 209
209 210 // Call and loop predicates determine whether a transition to a higher
210 211 // compilation level should be performed (pointers to predicate functions
211 212 // are passed to common()).
212 213 // Tier?LoadFeedback is basically a coefficient that determines of
213 214 // how many methods per compiler thread can be in the queue before
214 215 // the threshold values double.
215 216 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
216 217 switch(cur_level) {
217 218 case CompLevel_none:
218 219 case CompLevel_limited_profile: {
219 220 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
220 221 return loop_predicate_helper<CompLevel_none>(i, b, k);
221 222 }
222 223 case CompLevel_full_profile: {
223 224 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
224 225 return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
225 226 }
226 227 default:
227 228 return true;
228 229 }
229 230 }
230 231
231 232 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
232 233 switch(cur_level) {
233 234 case CompLevel_none:
234 235 case CompLevel_limited_profile: {
235 236 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
236 237 return call_predicate_helper<CompLevel_none>(i, b, k);
237 238 }
238 239 case CompLevel_full_profile: {
239 240 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
240 241 return call_predicate_helper<CompLevel_full_profile>(i, b, k);
241 242 }
242 243 default:
243 244 return true;
244 245 }
245 246 }
246 247
247 248 // If a method is old enough and is still in the interpreter we would want to
248 249 // start profiling without waiting for the compiled method to arrive.
249 250 // We also take the load on compilers into the account.
250 251 bool AdvancedThresholdPolicy::should_create_mdo(methodOop method, CompLevel cur_level) {
251 252 if (cur_level == CompLevel_none &&
252 253 CompileBroker::queue_size(CompLevel_full_optimization) <=
253 254 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
254 255 int i = method->invocation_count();
255 256 int b = method->backedge_count();
256 257 double k = Tier0ProfilingStartPercentage / 100.0;
257 258 return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
258 259 }
259 260 return false;
260 261 }
261 262
262 263 // Inlining control: if we're compiling a profiled method with C1 and the callee
263 264 // is known to have OSRed in a C2 version, don't inline it.
264 265 bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
265 266 CompLevel comp_level = (CompLevel)env->comp_level();
266 267 if (comp_level == CompLevel_full_profile ||
267 268 comp_level == CompLevel_limited_profile) {
268 269 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
269 270 }
270 271 return false;
271 272 }
272 273
273 274 // Create MDO if necessary.
274 275 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, TRAPS) {
275 276 if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
276 277 if (mh->method_data() == NULL) {
277 278 methodOopDesc::build_interpreter_method_data(mh, THREAD);
278 279 if (HAS_PENDING_EXCEPTION) {
279 280 CLEAR_PENDING_EXCEPTION;
280 281 }
281 282 }
282 283 }
283 284
284 285
285 286 /*
286 287 * Method states:
287 288 * 0 - interpreter (CompLevel_none)
288 289 * 1 - pure C1 (CompLevel_simple)
289 290 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
290 291 * 3 - C1 with full profiling (CompLevel_full_profile)
291 292 * 4 - C2 (CompLevel_full_optimization)
292 293 *
293 294 * Common state transition patterns:
294 295 * a. 0 -> 3 -> 4.
295 296 * The most common path. But note that even in this straightforward case
296 297 * profiling can start at level 0 and finish at level 3.
297 298 *
298 299 * b. 0 -> 2 -> 3 -> 4.
299 300 * This case occures when the load on C2 is deemed too high. So, instead of transitioning
300 301 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
301 302 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
302 303 *
303 304 * c. 0 -> (3->2) -> 4.
304 305 * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
305 306 * to enable the profiling to fully occur at level 0. In this case we change the compilation level
306 307 * of the method to 2, because it'll allow it to run much faster without full profiling while c2
307 308 * is compiling.
308 309 *
309 310 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
310 311 * After a method was once compiled with C1 it can be identified as trivial and be compiled to
311 312 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
312 313 *
313 314 * e. 0 -> 4.
↓ open down ↓ |
111 lines elided |
↑ open up ↑ |
314 315 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
315 316 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
316 317 * the compiled version already exists).
317 318 *
318 319 * Note that since state 0 can be reached from any other state via deoptimization different loops
319 320 * are possible.
320 321 *
321 322 */
322 323
323 324 // Common transition function. Given a predicate determines if a method should transition to another level.
324 -CompLevel AdvancedThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level) {
325 - if (is_trivial(method)) return CompLevel_simple;
326 -
325 +CompLevel AdvancedThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level, bool disable_feedback) {
327 326 CompLevel next_level = cur_level;
328 327 int i = method->invocation_count();
329 328 int b = method->backedge_count();
330 329
331 - switch(cur_level) {
332 - case CompLevel_none:
333 - // If we were at full profile level, would we switch to full opt?
334 - if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
335 - next_level = CompLevel_full_optimization;
336 - } else if ((this->*p)(i, b, cur_level)) {
337 - // C1-generated fully profiled code is about 30% slower than the limited profile
338 - // code that has only invocation and backedge counters. The observation is that
339 - // if C2 queue is large enough we can spend too much time in the fully profiled code
340 - // while waiting for C2 to pick the method from the queue. To alleviate this problem
341 - // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
342 - // we choose to compile a limited profiled version and then recompile with full profiling
343 - // when the load on C2 goes down.
344 - if (CompileBroker::queue_size(CompLevel_full_optimization) >
345 - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
346 - next_level = CompLevel_limited_profile;
347 - } else {
348 - next_level = CompLevel_full_profile;
330 + if (is_trivial(method)) {
331 + next_level = CompLevel_simple;
332 + } else {
333 + switch(cur_level) {
334 + case CompLevel_none:
335 + // If we were at full profile level, would we switch to full opt?
336 + if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
337 + next_level = CompLevel_full_optimization;
338 + } else if ((this->*p)(i, b, cur_level)) {
339 + // C1-generated fully profiled code is about 30% slower than the limited profile
340 + // code that has only invocation and backedge counters. The observation is that
341 + // if C2 queue is large enough we can spend too much time in the fully profiled code
342 + // while waiting for C2 to pick the method from the queue. To alleviate this problem
343 + // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
344 + // we choose to compile a limited profiled version and then recompile with full profiling
345 + // when the load on C2 goes down.
346 + if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
347 + Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
348 + next_level = CompLevel_limited_profile;
349 + } else {
350 + next_level = CompLevel_full_profile;
351 + }
349 352 }
350 - }
351 - break;
352 - case CompLevel_limited_profile:
353 - if (is_method_profiled(method)) {
354 - // Special case: we got here because this method was fully profiled in the interpreter.
355 - next_level = CompLevel_full_optimization;
356 - } else {
357 - methodDataOop mdo = method->method_data();
358 - if (mdo != NULL) {
359 - if (mdo->would_profile()) {
360 - if (CompileBroker::queue_size(CompLevel_full_optimization) <=
361 - Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
362 - (this->*p)(i, b, cur_level)) {
363 - next_level = CompLevel_full_profile;
353 + break;
354 + case CompLevel_limited_profile:
355 + if (is_method_profiled(method)) {
356 + // Special case: we got here because this method was fully profiled in the interpreter.
357 + next_level = CompLevel_full_optimization;
358 + } else {
359 + methodDataOop mdo = method->method_data();
360 + if (mdo != NULL) {
361 + if (mdo->would_profile()) {
362 + if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
363 + Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
364 + (this->*p)(i, b, cur_level))) {
365 + next_level = CompLevel_full_profile;
366 + }
367 + } else {
368 + next_level = CompLevel_full_optimization;
364 369 }
365 - } else {
366 - next_level = CompLevel_full_optimization;
367 370 }
368 371 }
369 - }
370 - break;
371 - case CompLevel_full_profile:
372 - {
373 - methodDataOop mdo = method->method_data();
374 - if (mdo != NULL) {
375 - if (mdo->would_profile()) {
376 - int mdo_i = mdo->invocation_count_delta();
377 - int mdo_b = mdo->backedge_count_delta();
378 - if ((this->*p)(mdo_i, mdo_b, cur_level)) {
372 + break;
373 + case CompLevel_full_profile:
374 + {
375 + methodDataOop mdo = method->method_data();
376 + if (mdo != NULL) {
377 + if (mdo->would_profile()) {
378 + int mdo_i = mdo->invocation_count_delta();
379 + int mdo_b = mdo->backedge_count_delta();
380 + if ((this->*p)(mdo_i, mdo_b, cur_level)) {
381 + next_level = CompLevel_full_optimization;
382 + }
383 + } else {
379 384 next_level = CompLevel_full_optimization;
380 385 }
381 - } else {
382 - next_level = CompLevel_full_optimization;
383 386 }
384 387 }
388 + break;
385 389 }
386 - break;
387 390 }
388 - return next_level;
391 + return MIN2(next_level, (CompLevel)TieredStopAtLevel);
389 392 }
390 393
391 394 // Determine if a method should be compiled with a normal entry point at a different level.
392 395 CompLevel AdvancedThresholdPolicy::call_event(methodOop method, CompLevel cur_level) {
393 396 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
394 - common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level));
397 + common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
395 398 CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
396 399
397 400 // If OSR method level is greater than the regular method level, the levels should be
398 401 // equalized by raising the regular method level in order to avoid OSRs during each
399 402 // invocation of the method.
400 403 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
401 404 methodDataOop mdo = method->method_data();
402 405 guarantee(mdo != NULL, "MDO should not be NULL");
403 406 if (mdo->invocation_count() >= 1) {
404 407 next_level = CompLevel_full_optimization;
405 408 }
406 409 } else {
407 410 next_level = MAX2(osr_level, next_level);
408 411 }
409 -
410 412 return next_level;
411 413 }
412 414
413 415 // Determine if we should do an OSR compilation of a given method.
414 416 CompLevel AdvancedThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) {
415 - CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level);
417 + CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
416 418 if (cur_level == CompLevel_none) {
417 419 // If there is a live OSR method that means that we deopted to the interpreter
418 420 // for the transition.
419 421 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
420 422 if (osr_level > CompLevel_none) {
421 423 return osr_level;
422 424 }
423 425 }
424 426 return next_level;
425 427 }
426 428
427 429 // Update the rate and submit compile
428 430 void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
429 431 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
430 432 update_rate(os::javaTimeMillis(), mh());
431 433 CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
432 434 }
433 435
434 436 // Handle the invocation event.
435 437 void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
436 438 CompLevel level, nmethod* nm, TRAPS) {
437 439 if (should_create_mdo(mh(), level)) {
438 440 create_mdo(mh, THREAD);
439 441 }
440 442 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
441 443 CompLevel next_level = call_event(mh(), level);
442 444 if (next_level != level) {
443 445 compile(mh, InvocationEntryBci, next_level, THREAD);
444 446 }
445 447 }
446 448 }
447 449
448 450 // Handle the back branch event. Notice that we can compile the method
449 451 // with a regular entry from here.
450 452 void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
451 453 int bci, CompLevel level, nmethod* nm, TRAPS) {
452 454 if (should_create_mdo(mh(), level)) {
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
453 455 create_mdo(mh, THREAD);
454 456 }
455 457 // Check if MDO should be created for the inlined method
456 458 if (should_create_mdo(imh(), level)) {
457 459 create_mdo(imh, THREAD);
458 460 }
459 461
460 462 if (is_compilation_enabled()) {
461 463 CompLevel next_osr_level = loop_event(imh(), level);
462 464 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
463 - if (next_osr_level == CompLevel_limited_profile) {
464 - next_osr_level = CompLevel_full_profile; // OSRs are supposed to be for very hot methods.
465 - }
466 -
467 465 // At the very least compile the OSR version
468 - if (!CompileBroker::compilation_is_in_queue(imh, bci)) {
469 - // Check if there's a method like that already
470 - nmethod* osr_nm = NULL;
471 - if (max_osr_level >= next_osr_level) {
472 - // There is an osr method already with the same
473 - // or greater level, check if it has the bci we need
474 - osr_nm = imh->lookup_osr_nmethod_for(bci, next_osr_level, false);
475 - }
476 - if (osr_nm == NULL) {
477 - compile(imh, bci, next_osr_level, THREAD);
478 - }
466 + if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
467 + compile(imh, bci, next_osr_level, THREAD);
479 468 }
480 469
481 470 // Use loop event as an opportunity to also check if there's been
482 471 // enough calls.
483 472 CompLevel cur_level, next_level;
484 473 if (mh() != imh()) { // If there is an enclosing method
485 474 guarantee(nm != NULL, "Should have nmethod here");
486 475 cur_level = comp_level(mh());
487 476 next_level = call_event(mh(), cur_level);
488 477
489 478 if (max_osr_level == CompLevel_full_optimization) {
490 479 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
491 480 bool make_not_entrant = false;
492 481 if (nm->is_osr_method()) {
493 482 // This is an osr method, just make it not entrant and recompile later if needed
494 483 make_not_entrant = true;
495 484 } else {
496 485 if (next_level != CompLevel_full_optimization) {
497 486 // next_level is not full opt, so we need to recompile the
498 487 // enclosing method without the inlinee
499 488 cur_level = CompLevel_none;
500 489 make_not_entrant = true;
501 490 }
502 491 }
503 492 if (make_not_entrant) {
504 493 if (PrintTieredEvents) {
505 494 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
506 495 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
507 496 }
508 497 nm->make_not_entrant();
509 498 }
510 499 }
511 500 if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
512 501 // Fix up next_level if necessary to avoid deopts
513 502 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
514 503 next_level = CompLevel_full_profile;
515 504 }
516 505 if (cur_level != next_level) {
517 506 compile(mh, InvocationEntryBci, next_level, THREAD);
518 507 }
519 508 }
520 509 } else {
521 510 cur_level = comp_level(imh());
522 511 next_level = call_event(imh(), cur_level);
523 512 if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
524 513 compile(imh, InvocationEntryBci, next_level, THREAD);
525 514 }
526 515 }
527 516 }
528 517 }
529 518
530 519 #endif // TIERED
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX