1 /*
   2  * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "runtime/advancedThresholdPolicy.hpp"
  28 #include "runtime/simpleThresholdPolicy.inline.hpp"
  29 #if INCLUDE_JVMCI
  30 #include "jvmci/jvmciRuntime.hpp"
  31 #endif
  32 
  33 #ifdef TIERED
  34 // Print an event.
  35 void AdvancedThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh,
  36                                              int bci, CompLevel level) {
  37   tty->print(" rate=");
  38   if (mh->prev_time() == 0) tty->print("n/a");
  39   else tty->print("%f", mh->rate());
  40 
  41   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
  42                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
  43 
  44 }
  45 
  46 void AdvancedThresholdPolicy::initialize() {
  47   int count = CICompilerCount;
  48 #ifdef _LP64
  49   // Turn on ergonomic compiler count selection
  50   if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
  51     FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
  52   }
  53   if (CICompilerCountPerCPU) {
  54     // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
  55     int log_cpu = log2_intptr(os::active_processor_count());
  56     int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
  57     count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
  58     // We only consider the processors up one per certain amount of CodeCache space.
  59     // Configuring a larger CodeCache enables more compiler threads.
  60     int CodeCacheSize_based_thread_limit = MAX2((int)(ReservedCodeCacheSize / (32 * M)), 2);
  61     count = MIN2(count, CodeCacheSize_based_thread_limit);
  62     FLAG_SET_ERGO(intx, CICompilerCount, count);
  63   }
  64 #else
  65   // On 32-bit systems, the number of compiler threads is limited to 3.
  66   // On these systems, the virtual address space available to the JVM
  67   // is usually limited to 2-4 GB (the exact value depends on the platform).
  68   // As the compilers (especially C2) can consume a large amount of
  69   // memory, scaling the number of compiler threads with the number of
  70   // available cores can result in the exhaustion of the address space
  71   /// available to the VM and thus cause the VM to crash.
  72   if (FLAG_IS_DEFAULT(CICompilerCount)) {
  73     count = 3;
  74     FLAG_SET_ERGO(intx, CICompilerCount, count);
  75   }
  76 #endif
  77 
  78   if (TieredStopAtLevel < CompLevel_full_optimization) {
  79     // No C2 compiler thread required
  80     set_c1_count(count);
  81   } else {
  82     set_c1_count(MAX2(count / 3, 1));
  83     set_c2_count(MAX2(count - c1_count(), 1));
  84   }
  85   assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
  86 
  87   // Some inlining tuning
  88 #ifdef X86
  89   if (FLAG_IS_DEFAULT(InlineSmallCode)) {
  90     FLAG_SET_DEFAULT(InlineSmallCode, 2000);
  91   }
  92 #endif
  93 
  94 #if defined SPARC || defined AARCH64
  95   if (FLAG_IS_DEFAULT(InlineSmallCode)) {
  96     FLAG_SET_DEFAULT(InlineSmallCode, 2500);
  97   }
  98 #endif
  99 
 100   set_increase_threshold_at_ratio();
 101   set_start_time(os::javaTimeMillis());
 102 }
 103 
 104 // update_rate() is called from select_task() while holding a compile queue lock.
 105 void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
 106   // Skip update if counters are absent.
 107   // Can't allocate them since we are holding compile queue lock.
 108   if (m->method_counters() == NULL)  return;
 109 
 110   if (is_old(m)) {
 111     // We don't remove old methods from the queue,
 112     // so we can just zero the rate.
 113     m->set_rate(0);
 114     return;
 115   }
 116 
 117   // We don't update the rate if we've just came out of a safepoint.
 118   // delta_s is the time since last safepoint in milliseconds.
 119   jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
 120   jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
 121   // How many events were there since the last time?
 122   int event_count = m->invocation_count() + m->backedge_count();
 123   int delta_e = event_count - m->prev_event_count();
 124 
 125   // We should be running for at least 1ms.
 126   if (delta_s >= TieredRateUpdateMinTime) {
 127     // And we must've taken the previous point at least 1ms before.
 128     if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
 129       m->set_prev_time(t);
 130       m->set_prev_event_count(event_count);
 131       m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
 132     } else {
 133       if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
 134         // If nothing happened for 25ms, zero the rate. Don't modify prev values.
 135         m->set_rate(0);
 136       }
 137     }
 138   }
 139 }
 140 
 141 // Check if this method has been stale from a given number of milliseconds.
 142 // See select_task().
 143 bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
 144   jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
 145   jlong delta_t = t - m->prev_time();
 146   if (delta_t > timeout && delta_s > timeout) {
 147     int event_count = m->invocation_count() + m->backedge_count();
 148     int delta_e = event_count - m->prev_event_count();
 149     // Return true if there were no events.
 150     return delta_e == 0;
 151   }
 152   return false;
 153 }
 154 
 155 // We don't remove old methods from the compile queue even if they have
 156 // very low activity. See select_task().
 157 bool AdvancedThresholdPolicy::is_old(Method* method) {
 158   return method->invocation_count() > 50000 || method->backedge_count() > 500000;
 159 }
 160 
 161 double AdvancedThresholdPolicy::weight(Method* method) {
 162   return (double)(method->rate() + 1) *
 163     (method->invocation_count() + 1) * (method->backedge_count() + 1);
 164 }
 165 
 166 // Apply heuristics and return true if x should be compiled before y
 167 bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
 168   if (x->highest_comp_level() > y->highest_comp_level()) {
 169     // recompilation after deopt
 170     return true;
 171   } else
 172     if (x->highest_comp_level() == y->highest_comp_level()) {
 173       if (weight(x) > weight(y)) {
 174         return true;
 175       }
 176     }
 177   return false;
 178 }
 179 
 180 // Is method profiled enough?
 181 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
 182   MethodData* mdo = method->method_data();
 183   if (mdo != NULL) {
 184     int i = mdo->invocation_count_delta();
 185     int b = mdo->backedge_count_delta();
 186     return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
 187   }
 188   return false;
 189 }
 190 
 191 // Called with the queue locked and with at least one element
 192 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
 193   CompileTask *max_blocking_task = NULL;
 194   CompileTask *max_task = NULL;
 195   Method* max_method = NULL;
 196   jlong t = os::javaTimeMillis();
 197   // Iterate through the queue and find a method with a maximum rate.
 198   for (CompileTask* task = compile_queue->first(); task != NULL;) {
 199     CompileTask* next_task = task->next();
 200     Method* method = task->method();
 201     update_rate(t, method);
 202     if (max_task == NULL) {
 203       max_task = task;
 204       max_method = method;
 205     } else {
 206       // If a method has been stale for some time, remove it from the queue.
 207       // Blocking tasks and tasks submitted from whitebox API don't become stale
 208       if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
 209         if (PrintTieredEvents) {
 210           print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
 211         }
 212         compile_queue->remove_and_mark_stale(task);
 213         method->clear_queued_for_compilation();
 214         task = next_task;
 215         continue;
 216       }
 217 
 218       // Select a method with a higher rate
 219       if (compare_methods(method, max_method)) {
 220         max_task = task;
 221         max_method = method;
 222       }
 223     }
 224 
 225     if (task->is_blocking()) {
 226       if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) {
 227         max_blocking_task = task;
 228       }
 229     }
 230 
 231     task = next_task;
 232   }
 233 
 234   if (max_blocking_task != NULL) {
 235     // In blocking compilation mode, the CompileBroker will make
 236     // compilations submitted by a JVMCI compiler thread non-blocking. These
 237     // compilations should be scheduled after all blocking compilations
 238     // to service non-compiler related compilations sooner and reduce the
 239     // chance of such compilations timing out.
 240     max_task = max_blocking_task;
 241     max_method = max_task->method();
 242   }
 243 
 244   if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
 245       && is_method_profiled(max_method)) {
 246     max_task->set_comp_level(CompLevel_limited_profile);
 247     if (PrintTieredEvents) {
 248       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 249     }
 250   }
 251 
 252   return max_task;
 253 }
 254 
 255 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
 256   double queue_size = CompileBroker::queue_size(level);
 257   int comp_count = compiler_count(level);
 258   double k = queue_size / (feedback_k * comp_count) + 1;
 259 
 260   // Increase C1 compile threshold when the code cache is filled more
 261   // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 262   // The main intention is to keep enough free space for C2 compiled code
 263   // to achieve peak performance if the code cache is under stress.
 264   if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
 265     double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
 266     if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
 267       k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
 268     }
 269   }
 270   return k;
 271 }
 272 
 273 // Call and loop predicates determine whether a transition to a higher
 274 // compilation level should be performed (pointers to predicate functions
 275 // are passed to common()).
 276 // Tier?LoadFeedback is basically a coefficient that determines of
 277 // how many methods per compiler thread can be in the queue before
 278 // the threshold values double.
 279 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
 280   switch(cur_level) {
 281   case CompLevel_aot: {
 282     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 283     return loop_predicate_helper<CompLevel_aot>(i, b, k, method);
 284   }
 285   case CompLevel_none:
 286   case CompLevel_limited_profile: {
 287     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 288     return loop_predicate_helper<CompLevel_none>(i, b, k, method);
 289   }
 290   case CompLevel_full_profile: {
 291     double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 292     return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
 293   }
 294   default:
 295     return true;
 296   }
 297 }
 298 
 299 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
 300   switch(cur_level) {
 301   case CompLevel_aot: {
 302     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 303     return call_predicate_helper<CompLevel_aot>(i, b, k, method);
 304   }
 305   case CompLevel_none:
 306   case CompLevel_limited_profile: {
 307     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 308     return call_predicate_helper<CompLevel_none>(i, b, k, method);
 309   }
 310   case CompLevel_full_profile: {
 311     double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 312     return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
 313   }
 314   default:
 315     return true;
 316   }
 317 }
 318 
 319 // If a method is old enough and is still in the interpreter we would want to
 320 // start profiling without waiting for the compiled method to arrive.
 321 // We also take the load on compilers into the account.
 322 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
 323   if (cur_level == CompLevel_none &&
 324       CompileBroker::queue_size(CompLevel_full_optimization) <=
 325       Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 326     int i = method->invocation_count();
 327     int b = method->backedge_count();
 328     double k = Tier0ProfilingStartPercentage / 100.0;
 329     return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
 330   }
 331   return false;
 332 }
 333 
 334 // Inlining control: if we're compiling a profiled method with C1 and the callee
 335 // is known to have OSRed in a C2 version, don't inline it.
 336 bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
 337   CompLevel comp_level = (CompLevel)env->comp_level();
 338   if (comp_level == CompLevel_full_profile ||
 339       comp_level == CompLevel_limited_profile) {
 340     return callee->highest_osr_comp_level() == CompLevel_full_optimization;
 341   }
 342   return false;
 343 }
 344 
 345 // Create MDO if necessary.
 346 void AdvancedThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
 347   if (mh->is_native() ||
 348       mh->is_abstract() ||
 349       mh->is_accessor() ||
 350       mh->is_constant_getter()) {
 351     return;
 352   }
 353   if (mh->method_data() == NULL) {
 354     Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
 355   }
 356 }
 357 
 358 
 359 /*
 360  * Method states:
 361  *   0 - interpreter (CompLevel_none)
 362  *   1 - pure C1 (CompLevel_simple)
 363  *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
 364  *   3 - C1 with full profiling (CompLevel_full_profile)
 365  *   4 - C2 (CompLevel_full_optimization)
 366  *
 367  * Common state transition patterns:
 368  * a. 0 -> 3 -> 4.
 369  *    The most common path. But note that even in this straightforward case
 370  *    profiling can start at level 0 and finish at level 3.
 371  *
 372  * b. 0 -> 2 -> 3 -> 4.
 373  *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
 374  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
 375  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
 376  *
 377  * c. 0 -> (3->2) -> 4.
 378  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
 379  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
 380  *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
 381  *    without full profiling while c2 is compiling.
 382  *
 383  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
 384  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
 385  *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
 386  *
 387  * e. 0 -> 4.
 388  *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
 389  *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
 390  *    the compiled version already exists).
 391  *
 392  * Note that since state 0 can be reached from any other state via deoptimization different loops
 393  * are possible.
 394  *
 395  */
 396 
 397 // Common transition function. Given a predicate determines if a method should transition to another level.
 398 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
 399   CompLevel next_level = cur_level;
 400   int i = method->invocation_count();
 401   int b = method->backedge_count();
 402 
 403   if (is_trivial(method)) {
 404     next_level = CompLevel_simple;
 405   } else {
 406     switch(cur_level) {
 407       default: break;
 408       case CompLevel_aot: {
 409       // If we were at full profile level, would we switch to full opt?
 410       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
 411         next_level = CompLevel_full_optimization;
 412       } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
 413                                Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
 414                                (this->*p)(i, b, cur_level, method))) {
 415         next_level = CompLevel_full_profile;
 416       }
 417     }
 418     break;
 419     case CompLevel_none:
 420       // If we were at full profile level, would we switch to full opt?
 421       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
 422         next_level = CompLevel_full_optimization;
 423       } else if ((this->*p)(i, b, cur_level, method)) {
 424 #if INCLUDE_JVMCI
 425         if (EnableJVMCI && UseJVMCICompiler) {
 426           // Since JVMCI takes a while to warm up, its queue inevitably backs up during
 427           // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root
 428           // compilation method and all potential inlinees have mature profiles (which
 429           // includes type profiling). If it sees immature profiles, JVMCI's inliner
 430           // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to
 431           // exploring/inlining too many graphs). Since a rewrite of the inliner is
 432           // in progress, we simply disable the dialing back heuristic for now and will
 433           // revisit this decision once the new inliner is completed.
 434           next_level = CompLevel_full_profile;
 435         } else
 436 #endif
 437         {
 438           // C1-generated fully profiled code is about 30% slower than the limited profile
 439           // code that has only invocation and backedge counters. The observation is that
 440           // if C2 queue is large enough we can spend too much time in the fully profiled code
 441           // while waiting for C2 to pick the method from the queue. To alleviate this problem
 442           // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
 443           // we choose to compile a limited profiled version and then recompile with full profiling
 444           // when the load on C2 goes down.
 445           if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
 446               Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 447             next_level = CompLevel_limited_profile;
 448           } else {
 449             next_level = CompLevel_full_profile;
 450           }
 451         }
 452       }
 453       break;
 454     case CompLevel_limited_profile:
 455       if (is_method_profiled(method)) {
 456         // Special case: we got here because this method was fully profiled in the interpreter.
 457         next_level = CompLevel_full_optimization;
 458       } else {
 459         MethodData* mdo = method->method_data();
 460         if (mdo != NULL) {
 461           if (mdo->would_profile()) {
 462             if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
 463                                      Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
 464                                      (this->*p)(i, b, cur_level, method))) {
 465               next_level = CompLevel_full_profile;
 466             }
 467           } else {
 468             next_level = CompLevel_full_optimization;
 469           }
 470         } else {
 471           // If there is no MDO we need to profile
 472           if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
 473                                    Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
 474                                    (this->*p)(i, b, cur_level, method))) {
 475             next_level = CompLevel_full_profile;
 476           }
 477         }
 478       }
 479       break;
 480     case CompLevel_full_profile:
 481       {
 482         MethodData* mdo = method->method_data();
 483         if (mdo != NULL) {
 484           if (mdo->would_profile()) {
 485             int mdo_i = mdo->invocation_count_delta();
 486             int mdo_b = mdo->backedge_count_delta();
 487             if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
 488               next_level = CompLevel_full_optimization;
 489             }
 490           } else {
 491             next_level = CompLevel_full_optimization;
 492           }
 493         }
 494       }
 495       break;
 496     }
 497   }
 498   return MIN2(next_level, (CompLevel)TieredStopAtLevel);
 499 }
 500 
 501 // Determine if a method should be compiled with a normal entry point at a different level.
 502 CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) {
 503   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
 504                              common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
 505   CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
 506 
 507   // If OSR method level is greater than the regular method level, the levels should be
 508   // equalized by raising the regular method level in order to avoid OSRs during each
 509   // invocation of the method.
 510   if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
 511     MethodData* mdo = method->method_data();
 512     guarantee(mdo != NULL, "MDO should not be NULL");
 513     if (mdo->invocation_count() >= 1) {
 514       next_level = CompLevel_full_optimization;
 515     }
 516   } else {
 517     next_level = MAX2(osr_level, next_level);
 518   }
 519 #if INCLUDE_JVMCI
 520   if (UseJVMCICompiler) {
 521     next_level = JVMCIRuntime::adjust_comp_level(method, false, next_level, thread);
 522   }
 523 #endif
 524   return next_level;
 525 }
 526 
 527 // Determine if we should do an OSR compilation of a given method.
 528 CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread * thread) {
 529   CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
 530   if (cur_level == CompLevel_none) {
 531     // If there is a live OSR method that means that we deopted to the interpreter
 532     // for the transition.
 533     CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
 534     if (osr_level > CompLevel_none) {
 535       return osr_level;
 536     }
 537   }
 538 #if INCLUDE_JVMCI
 539   if (UseJVMCICompiler) {
 540     next_level = JVMCIRuntime::adjust_comp_level(method, true, next_level, thread);
 541   }
 542 #endif
 543   return next_level;
 544 }
 545 
 546 // Update the rate and submit compile
 547 void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
 548   int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
 549   update_rate(os::javaTimeMillis(), mh());
 550   CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread);
 551 }
 552 
 553 bool AdvancedThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) {
 554   if (UseAOT && !delay_compilation_during_startup()) {
 555     if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) {
 556       // If the current level is full profile or interpreter and we're switching to any other level,
 557       // activate the AOT code back first so that we won't waste time overprofiling.
 558       compile(mh, InvocationEntryBci, CompLevel_aot, thread);
 559       // Fall through for JIT compilation.
 560     }
 561     if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) {
 562       // If the next level is limited profile, use the aot code (if there is any),
 563       // since it's essentially the same thing.
 564       compile(mh, InvocationEntryBci, CompLevel_aot, thread);
 565       // Not need to JIT, we're done.
 566       return true;
 567     }
 568   }
 569   return false;
 570 }
 571 
 572 
 573 // Handle the invocation event.
 574 void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
 575                                                       CompLevel level, CompiledMethod* nm, JavaThread* thread) {
 576   if (should_create_mdo(mh(), level)) {
 577     create_mdo(mh, thread);
 578   }
 579   CompLevel next_level = call_event(mh(), level, thread);
 580   if (next_level != level) {
 581     if (maybe_switch_to_aot(mh, level, next_level, thread)) {
 582       // No JITting necessary
 583       return;
 584     }
 585     if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
 586       compile(mh, InvocationEntryBci, next_level, thread);
 587     }
 588   }
 589 }
 590 
 591 // Handle the back branch event. Notice that we can compile the method
 592 // with a regular entry from here.
 593 void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
 594                                                        int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) {
 595   if (should_create_mdo(mh(), level)) {
 596     create_mdo(mh, thread);
 597   }
 598   // Check if MDO should be created for the inlined method
 599   if (should_create_mdo(imh(), level)) {
 600     create_mdo(imh, thread);
 601   }
 602 
 603   if (is_compilation_enabled()) {
 604     CompLevel next_osr_level = loop_event(imh(), level, thread);
 605     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
 606     // At the very least compile the OSR version
 607     if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
 608       compile(imh, bci, next_osr_level, thread);
 609     }
 610 
 611     // Use loop event as an opportunity to also check if there's been
 612     // enough calls.
 613     CompLevel cur_level, next_level;
 614     if (mh() != imh()) { // If there is an enclosing method
 615       if (level == CompLevel_aot) {
 616         // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
 617         if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
 618           compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread);
 619         }
 620       } else {
 621         // Current loop event level is not AOT
 622         guarantee(nm != NULL, "Should have nmethod here");
 623         cur_level = comp_level(mh());
 624         next_level = call_event(mh(), cur_level, thread);
 625 
 626         if (max_osr_level == CompLevel_full_optimization) {
 627           // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
 628           bool make_not_entrant = false;
 629           if (nm->is_osr_method()) {
 630             // This is an osr method, just make it not entrant and recompile later if needed
 631             make_not_entrant = true;
 632           } else {
 633             if (next_level != CompLevel_full_optimization) {
 634               // next_level is not full opt, so we need to recompile the
 635               // enclosing method without the inlinee
 636               cur_level = CompLevel_none;
 637               make_not_entrant = true;
 638             }
 639           }
 640           if (make_not_entrant) {
 641             if (PrintTieredEvents) {
 642               int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
 643               print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
 644             }
 645             nm->make_not_entrant();
 646           }
 647         }
 648         // Fix up next_level if necessary to avoid deopts
 649         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
 650           next_level = CompLevel_full_profile;
 651         }
 652         if (cur_level != next_level) {
 653           if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
 654             compile(mh, InvocationEntryBci, next_level, thread);
 655           }
 656         }
 657       }
 658     } else {
 659       cur_level = comp_level(mh());
 660       next_level = call_event(mh(), cur_level, thread);
 661       if (next_level != cur_level) {
 662         if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
 663           compile(mh, InvocationEntryBci, next_level, thread);
 664         }
 665       }
 666     }
 667   }
 668 }
 669 
 670 #endif // TIERED