7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "compiler/compileTask.hpp"
28 #include "runtime/advancedThresholdPolicy.hpp"
29 #include "runtime/simpleThresholdPolicy.inline.hpp"
30 #if INCLUDE_JVMCI
31 #include "jvmci/jvmciRuntime.hpp"
32 #endif
33
34 #ifdef TIERED
35 // Print an event.
36 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
37 int bci, CompLevel level) {
38 tty->print(" rate=");
39 if (mh->prev_time() == 0) tty->print("n/a");
40 else tty->print("%f", mh->rate());
41
42 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
43 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
44
45 }
46
47 void AdvancedThresholdPolicy::initialize() {
189 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
190 CompileTask *max_blocking_task = NULL;
191 CompileTask *max_task = NULL;
192 Method* max_method = NULL;
193 jlong t = os::javaTimeMillis();
194 // Iterate through the queue and find a method with a maximum rate.
195 for (CompileTask* task = compile_queue->first(); task != NULL;) {
196 CompileTask* next_task = task->next();
197 Method* method = task->method();
198 update_rate(t, method);
199 if (max_task == NULL) {
200 max_task = task;
201 max_method = method;
202 } else {
203 // If a method has been stale for some time, remove it from the queue.
204 // Blocking tasks and tasks submitted from whitebox API don't become stale
205 if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
206 if (PrintTieredEvents) {
207 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
208 }
209 task->log_task_dequeued("stale");
210 compile_queue->remove_and_mark_stale(task);
211 method->clear_queued_for_compilation();
212 task = next_task;
213 continue;
214 }
215
216 // Select a method with a higher rate
217 if (compare_methods(method, max_method)) {
218 max_task = task;
219 max_method = method;
220 }
221 }
222
223 if (task->is_blocking()) {
224 if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) {
225 max_blocking_task = task;
226 }
227 }
228
229 task = next_task;
259 // than specified by IncreaseFirstTierCompileThresholdAt percentage.
260 // The main intention is to keep enough free space for C2 compiled code
261 // to achieve peak performance if the code cache is under stress.
262 if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) {
263 double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
264 if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
265 k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
266 }
267 }
268 return k;
269 }
270
271 // Call and loop predicates determine whether a transition to a higher
272 // compilation level should be performed (pointers to predicate functions
273 // are passed to common()).
274 // Tier?LoadFeedback is basically a coefficient that determines of
275 // how many methods per compiler thread can be in the queue before
276 // the threshold values double.
277 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
278 switch(cur_level) {
279 case CompLevel_none:
280 case CompLevel_limited_profile: {
281 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
282 return loop_predicate_helper<CompLevel_none>(i, b, k, method);
283 }
284 case CompLevel_full_profile: {
285 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
286 return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
287 }
288 default:
289 return true;
290 }
291 }
292
293 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
294 switch(cur_level) {
295 case CompLevel_none:
296 case CompLevel_limited_profile: {
297 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
298 return call_predicate_helper<CompLevel_none>(i, b, k, method);
299 }
300 case CompLevel_full_profile: {
301 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
302 return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
303 }
304 default:
305 return true;
306 }
307 }
308
309 // If a method is old enough and is still in the interpreter we would want to
310 // start profiling without waiting for the compiled method to arrive.
311 // We also take the load on compilers into the account.
312 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
313 if (cur_level == CompLevel_none &&
314 CompileBroker::queue_size(CompLevel_full_optimization) <=
377 * e. 0 -> 4.
378 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
379 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
380 * the compiled version already exists).
381 *
382 * Note that since state 0 can be reached from any other state via deoptimization different loops
383 * are possible.
384 *
385 */
386
387 // Common transition function. Given a predicate determines if a method should transition to another level.
388 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
389 CompLevel next_level = cur_level;
390 int i = method->invocation_count();
391 int b = method->backedge_count();
392
393 if (is_trivial(method)) {
394 next_level = CompLevel_simple;
395 } else {
396 switch(cur_level) {
397 case CompLevel_none:
398 // If we were at full profile level, would we switch to full opt?
399 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
400 next_level = CompLevel_full_optimization;
401 } else if ((this->*p)(i, b, cur_level, method)) {
402 #if INCLUDE_JVMCI
403 if (UseJVMCICompiler) {
404 // Since JVMCI takes a while to warm up, its queue inevitably backs up during
405 // early VM execution.
406 next_level = CompLevel_full_profile;
407 break;
408 }
409 #endif
410 // C1-generated fully profiled code is about 30% slower than the limited profile
411 // code that has only invocation and backedge counters. The observation is that
412 // if C2 queue is large enough we can spend too much time in the fully profiled code
413 // while waiting for C2 to pick the method from the queue. To alleviate this problem
414 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
415 // we choose to compile a limited profiled version and then recompile with full profiling
416 // when the load on C2 goes down.
417 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
418 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
419 next_level = CompLevel_limited_profile;
420 } else {
421 next_level = CompLevel_full_profile;
422 }
423 }
424 break;
425 case CompLevel_limited_profile:
426 if (is_method_profiled(method)) {
427 // Special case: we got here because this method was fully profiled in the interpreter.
428 next_level = CompLevel_full_optimization;
429 } else {
430 MethodData* mdo = method->method_data();
431 if (mdo != NULL) {
432 if (mdo->would_profile()) {
433 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
434 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
435 (this->*p)(i, b, cur_level, method))) {
436 next_level = CompLevel_full_profile;
437 }
438 } else {
439 next_level = CompLevel_full_optimization;
440 }
441 }
442 }
443 break;
444 case CompLevel_full_profile:
445 {
446 MethodData* mdo = method->method_data();
447 if (mdo != NULL) {
448 if (mdo->would_profile()) {
449 int mdo_i = mdo->invocation_count_delta();
450 int mdo_b = mdo->backedge_count_delta();
451 if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
452 next_level = CompLevel_full_optimization;
453 }
454 } else {
455 next_level = CompLevel_full_optimization;
456 }
457 }
458 }
459 break;
460 }
497 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
498 if (osr_level > CompLevel_none) {
499 return osr_level;
500 }
501 }
502 #if INCLUDE_JVMCI
503 if (UseJVMCICompiler) {
504 next_level = JVMCIRuntime::adjust_comp_level(method, true, next_level, thread);
505 }
506 #endif
507 return next_level;
508 }
509
510 // Update the rate and submit compile
511 void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
512 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
513 update_rate(os::javaTimeMillis(), mh());
514 CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread);
515 }
516
517 // Handle the invocation event.
518 void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
519 CompLevel level, CompiledMethod* nm, JavaThread* thread) {
520 if (should_create_mdo(mh(), level)) {
521 create_mdo(mh, thread);
522 }
523 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
524 CompLevel next_level = call_event(mh(), level, thread);
525 if (next_level != level) {
526 compile(mh, InvocationEntryBci, next_level, thread);
527 }
528 }
529 }
530
531 // Handle the back branch event. Notice that we can compile the method
532 // with a regular entry from here.
533 void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
534 int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) {
535 if (should_create_mdo(mh(), level)) {
536 create_mdo(mh, thread);
537 }
538 // Check if MDO should be created for the inlined method
539 if (should_create_mdo(imh(), level)) {
540 create_mdo(imh, thread);
541 }
542
543 if (is_compilation_enabled()) {
544 CompLevel next_osr_level = loop_event(imh(), level, thread);
545 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
546 // At the very least compile the OSR version
547 if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
548 compile(imh, bci, next_osr_level, thread);
549 }
550
551 // Use loop event as an opportunity to also check if there's been
552 // enough calls.
553 CompLevel cur_level, next_level;
554 if (mh() != imh()) { // If there is an enclosing method
555 guarantee(nm != NULL, "Should have nmethod here");
556 cur_level = comp_level(mh());
557 next_level = call_event(mh(), cur_level, thread);
558
559 if (max_osr_level == CompLevel_full_optimization) {
560 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
561 bool make_not_entrant = false;
562 if (nm->is_osr_method()) {
563 // This is an osr method, just make it not entrant and recompile later if needed
564 make_not_entrant = true;
565 } else {
566 if (next_level != CompLevel_full_optimization) {
567 // next_level is not full opt, so we need to recompile the
568 // enclosing method without the inlinee
569 cur_level = CompLevel_none;
570 make_not_entrant = true;
571 }
572 }
573 if (make_not_entrant) {
574 if (PrintTieredEvents) {
575 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
576 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
577 }
578 nm->make_not_entrant();
579 }
580 }
581 if (!CompileBroker::compilation_is_in_queue(mh)) {
582 // Fix up next_level if necessary to avoid deopts
583 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
584 next_level = CompLevel_full_profile;
585 }
586 if (cur_level != next_level) {
587 compile(mh, InvocationEntryBci, next_level, thread);
588 }
589 }
590 } else {
591 cur_level = comp_level(imh());
592 next_level = call_event(imh(), cur_level, thread);
593 if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
594 compile(imh, InvocationEntryBci, next_level, thread);
595 }
596 }
597 }
598 }
599
600 #endif // TIERED
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "runtime/advancedThresholdPolicy.hpp"
28 #include "runtime/simpleThresholdPolicy.inline.hpp"
29 #if INCLUDE_JVMCI
30 #include "jvmci/jvmciRuntime.hpp"
31 #endif
32
33 #ifdef TIERED
34 // Print an event.
35 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
36 int bci, CompLevel level) {
37 tty->print(" rate=");
38 if (mh->prev_time() == 0) tty->print("n/a");
39 else tty->print("%f", mh->rate());
40
41 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
42 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
43
44 }
45
46 void AdvancedThresholdPolicy::initialize() {
188 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
189 CompileTask *max_blocking_task = NULL;
190 CompileTask *max_task = NULL;
191 Method* max_method = NULL;
192 jlong t = os::javaTimeMillis();
193 // Iterate through the queue and find a method with a maximum rate.
194 for (CompileTask* task = compile_queue->first(); task != NULL;) {
195 CompileTask* next_task = task->next();
196 Method* method = task->method();
197 update_rate(t, method);
198 if (max_task == NULL) {
199 max_task = task;
200 max_method = method;
201 } else {
202 // If a method has been stale for some time, remove it from the queue.
203 // Blocking tasks and tasks submitted from whitebox API don't become stale
204 if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
205 if (PrintTieredEvents) {
206 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
207 }
208 compile_queue->remove_and_mark_stale(task);
209 method->clear_queued_for_compilation();
210 task = next_task;
211 continue;
212 }
213
214 // Select a method with a higher rate
215 if (compare_methods(method, max_method)) {
216 max_task = task;
217 max_method = method;
218 }
219 }
220
221 if (task->is_blocking()) {
222 if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) {
223 max_blocking_task = task;
224 }
225 }
226
227 task = next_task;
257 // than specified by IncreaseFirstTierCompileThresholdAt percentage.
258 // The main intention is to keep enough free space for C2 compiled code
259 // to achieve peak performance if the code cache is under stress.
260 if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) {
261 double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
262 if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
263 k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
264 }
265 }
266 return k;
267 }
268
269 // Call and loop predicates determine whether a transition to a higher
270 // compilation level should be performed (pointers to predicate functions
271 // are passed to common()).
272 // Tier?LoadFeedback is basically a coefficient that determines of
273 // how many methods per compiler thread can be in the queue before
274 // the threshold values double.
275 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
276 switch(cur_level) {
277 case CompLevel_aot: {
278 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
279 return loop_predicate_helper<CompLevel_aot>(i, b, k, method);
280 }
281 case CompLevel_none:
282 case CompLevel_limited_profile: {
283 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
284 return loop_predicate_helper<CompLevel_none>(i, b, k, method);
285 }
286 case CompLevel_full_profile: {
287 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
288 return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
289 }
290 default:
291 return true;
292 }
293 }
294
295 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
296 switch(cur_level) {
297 case CompLevel_aot: {
298 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
299 return call_predicate_helper<CompLevel_aot>(i, b, k, method);
300 }
301 case CompLevel_none:
302 case CompLevel_limited_profile: {
303 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
304 return call_predicate_helper<CompLevel_none>(i, b, k, method);
305 }
306 case CompLevel_full_profile: {
307 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
308 return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
309 }
310 default:
311 return true;
312 }
313 }
314
315 // If a method is old enough and is still in the interpreter we would want to
316 // start profiling without waiting for the compiled method to arrive.
317 // We also take the load on compilers into the account.
318 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
319 if (cur_level == CompLevel_none &&
320 CompileBroker::queue_size(CompLevel_full_optimization) <=
383 * e. 0 -> 4.
384 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
385 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
386 * the compiled version already exists).
387 *
388 * Note that since state 0 can be reached from any other state via deoptimization different loops
389 * are possible.
390 *
391 */
392
393 // Common transition function. Given a predicate determines if a method should transition to another level.
394 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
395 CompLevel next_level = cur_level;
396 int i = method->invocation_count();
397 int b = method->backedge_count();
398
399 if (is_trivial(method)) {
400 next_level = CompLevel_simple;
401 } else {
402 switch(cur_level) {
403 case CompLevel_aot: {
404 // If we were at full profile level, would we switch to full opt?
405 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
406 next_level = CompLevel_full_optimization;
407 } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
408 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
409 (this->*p)(i, b, cur_level, method))) {
410 next_level = CompLevel_full_profile;
411 }
412 }
413 break;
414 case CompLevel_none:
415 // If we were at full profile level, would we switch to full opt?
416 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
417 next_level = CompLevel_full_optimization;
418 } else if ((this->*p)(i, b, cur_level, method)) {
419 #if INCLUDE_JVMCI
420 if (EnableJVMCI && UseJVMCICompiler) {
421 // Since JVMCI takes a while to warm up, its queue inevitably backs up during
422 // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root
423 // compilation method and all potential inlinees have mature profiles (which
424 // includes type profiling). If it sees immature profiles, JVMCI's inliner
425 // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to
426 // exploring/inlining too many graphs). Since a rewrite of the inliner is
427 // in progress, we simply disable the dialing back heuristic for now and will
428 // revisit this decision once the new inliner is completed.
429 next_level = CompLevel_full_profile;
430 } else
431 #endif
432 {
433 // C1-generated fully profiled code is about 30% slower than the limited profile
434 // code that has only invocation and backedge counters. The observation is that
435 // if C2 queue is large enough we can spend too much time in the fully profiled code
436 // while waiting for C2 to pick the method from the queue. To alleviate this problem
437 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
438 // we choose to compile a limited profiled version and then recompile with full profiling
439 // when the load on C2 goes down.
440 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
441 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
442 next_level = CompLevel_limited_profile;
443 } else {
444 next_level = CompLevel_full_profile;
445 }
446 }
447 }
448 break;
449 case CompLevel_limited_profile:
450 if (is_method_profiled(method)) {
451 // Special case: we got here because this method was fully profiled in the interpreter.
452 next_level = CompLevel_full_optimization;
453 } else {
454 MethodData* mdo = method->method_data();
455 if (mdo != NULL) {
456 if (mdo->would_profile()) {
457 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
458 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
459 (this->*p)(i, b, cur_level, method))) {
460 next_level = CompLevel_full_profile;
461 }
462 } else {
463 next_level = CompLevel_full_optimization;
464 }
465 } else {
466 // If there is no MDO we need to profile
467 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
468 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
469 (this->*p)(i, b, cur_level, method))) {
470 next_level = CompLevel_full_profile;
471 }
472 }
473 }
474 break;
475 case CompLevel_full_profile:
476 {
477 MethodData* mdo = method->method_data();
478 if (mdo != NULL) {
479 if (mdo->would_profile()) {
480 int mdo_i = mdo->invocation_count_delta();
481 int mdo_b = mdo->backedge_count_delta();
482 if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
483 next_level = CompLevel_full_optimization;
484 }
485 } else {
486 next_level = CompLevel_full_optimization;
487 }
488 }
489 }
490 break;
491 }
528 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
529 if (osr_level > CompLevel_none) {
530 return osr_level;
531 }
532 }
533 #if INCLUDE_JVMCI
534 if (UseJVMCICompiler) {
535 next_level = JVMCIRuntime::adjust_comp_level(method, true, next_level, thread);
536 }
537 #endif
538 return next_level;
539 }
540
541 // Update the rate and submit compile
542 void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
543 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
544 update_rate(os::javaTimeMillis(), mh());
545 CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread);
546 }
547
548 bool AdvancedThresholdPolicy::maybe_switch_to_aot(methodHandle mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) {
549 if (UseAOT && !delay_compilation_during_startup()) {
550 if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) {
551 // If the current level is full profile or interpreter and we're switching to any other level,
552 // activate the AOT code back first so that we won't waste time overprofiling.
553 compile(mh, InvocationEntryBci, CompLevel_aot, thread);
554 // Fall through for JIT compilation.
555 }
556 if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) {
557 // If the next level is limited profile, use the aot code (if there is any),
558 // since it's essentially the same thing.
559 compile(mh, InvocationEntryBci, CompLevel_aot, thread);
560 // Not need to JIT, we're done.
561 return true;
562 }
563 }
564 return false;
565 }
566
567
568 // Handle the invocation event.
569 void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
570 CompLevel level, CompiledMethod* nm, JavaThread* thread) {
571 if (should_create_mdo(mh(), level)) {
572 create_mdo(mh, thread);
573 }
574 CompLevel next_level = call_event(mh(), level, thread);
575 if (next_level != level) {
576 if (maybe_switch_to_aot(mh, level, next_level, thread)) {
577 // No JITting necessary
578 return;
579 }
580 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
581 compile(mh, InvocationEntryBci, next_level, thread);
582 }
583 }
584 }
585
586 // Handle the back branch event. Notice that we can compile the method
587 // with a regular entry from here.
588 void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
589 int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) {
590 if (should_create_mdo(mh(), level)) {
591 create_mdo(mh, thread);
592 }
593 // Check if MDO should be created for the inlined method
594 if (should_create_mdo(imh(), level)) {
595 create_mdo(imh, thread);
596 }
597
598 if (is_compilation_enabled()) {
599 CompLevel next_osr_level = loop_event(imh(), level, thread);
600 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
601 // At the very least compile the OSR version
602 if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
603 compile(imh, bci, next_osr_level, thread);
604 }
605
606 // Use loop event as an opportunity to also check if there's been
607 // enough calls.
608 CompLevel cur_level, next_level;
609 if (mh() != imh()) { // If there is an enclosing method
610 if (level == CompLevel_aot) {
611 // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
612 if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
613 compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread);
614 }
615 } else {
616 // Current loop event level is not AOT
617 guarantee(nm != NULL, "Should have nmethod here");
618 cur_level = comp_level(mh());
619 next_level = call_event(mh(), cur_level, thread);
620
621 if (max_osr_level == CompLevel_full_optimization) {
622 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
623 bool make_not_entrant = false;
624 if (nm->is_osr_method()) {
625 // This is an osr method, just make it not entrant and recompile later if needed
626 make_not_entrant = true;
627 } else {
628 if (next_level != CompLevel_full_optimization) {
629 // next_level is not full opt, so we need to recompile the
630 // enclosing method without the inlinee
631 cur_level = CompLevel_none;
632 make_not_entrant = true;
633 }
634 }
635 if (make_not_entrant) {
636 if (PrintTieredEvents) {
637 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
638 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
639 }
640 nm->make_not_entrant();
641 }
642 }
643 // Fix up next_level if necessary to avoid deopts
644 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
645 next_level = CompLevel_full_profile;
646 }
647 if (cur_level != next_level) {
648 if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
649 compile(mh, InvocationEntryBci, next_level, thread);
650 }
651 }
652 }
653 } else {
654 cur_level = comp_level(mh());
655 next_level = call_event(mh(), cur_level, thread);
656 if (next_level != cur_level) {
657 if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
658 compile(mh, InvocationEntryBci, next_level, thread);
659 }
660 }
661 }
662 }
663 }
664
665 #endif // TIERED
|