1 /*
   2  * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "runtime/advancedThresholdPolicy.hpp"
  28 #include "runtime/simpleThresholdPolicy.inline.hpp"
  29 #if INCLUDE_JVMCI
  30 #include "jvmci/jvmciRuntime.hpp"
  31 #endif
  32 
  33 #ifdef TIERED
  34 // Print an event.
  35 void AdvancedThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh,
  36                                              int bci, CompLevel level) {
  37   tty->print(" rate=");
  38   if (mh->prev_time() == 0) tty->print("n/a");
  39   else tty->print("%f", mh->rate());
  40 
  41   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
  42                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
  43 
  44 }
  45 
  46 void AdvancedThresholdPolicy::initialize() {
  47   int count = CICompilerCount;
  48 #ifdef _LP64
  49   // Turn on ergonomic compiler count selection
  50   if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
  51     FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
  52   }
  53   if (CICompilerCountPerCPU) {
  54     // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
  55     int log_cpu = log2_intptr(os::active_processor_count());
  56     int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
  57     count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
  58     FLAG_SET_ERGO(intx, CICompilerCount, count);
  59   }
  60 #else
  61   // On 32-bit systems, the number of compiler threads is limited to 3.
  62   // On these systems, the virtual address space available to the JVM
  63   // is usually limited to 2-4 GB (the exact value depends on the platform).
  64   // As the compilers (especially C2) can consume a large amount of
  65   // memory, scaling the number of compiler threads with the number of
  66   // available cores can result in the exhaustion of the address space
  67   /// available to the VM and thus cause the VM to crash.
  68   if (FLAG_IS_DEFAULT(CICompilerCount)) {
  69     count = 3;
  70     FLAG_SET_ERGO(intx, CICompilerCount, count);
  71   }
  72 #endif
  73 
  74   if (TieredStopAtLevel < CompLevel_full_optimization) {
  75     // No C2 compiler thread required
  76     set_c1_count(count);
  77   } else {
  78     set_c1_count(MAX2(count / 3, 1));
  79     set_c2_count(MAX2(count - c1_count(), 1));
  80   }
  81   assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
  82 
  83   // Some inlining tuning
  84 #ifdef X86
  85   if (FLAG_IS_DEFAULT(InlineSmallCode)) {
  86     FLAG_SET_DEFAULT(InlineSmallCode, 2000);
  87   }
  88 #endif
  89 
  90 #if defined SPARC || defined AARCH64
  91   if (FLAG_IS_DEFAULT(InlineSmallCode)) {
  92     FLAG_SET_DEFAULT(InlineSmallCode, 2500);
  93   }
  94 #endif
  95 
  96   set_increase_threshold_at_ratio();
  97   set_start_time(os::javaTimeMillis());
  98 }
  99 
 100 // update_rate() is called from select_task() while holding a compile queue lock.
 101 void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
 102   // Skip update if counters are absent.
 103   // Can't allocate them since we are holding compile queue lock.
 104   if (m->method_counters() == NULL)  return;
 105 
 106   if (is_old(m)) {
 107     // We don't remove old methods from the queue,
 108     // so we can just zero the rate.
 109     m->set_rate(0);
 110     return;
 111   }
 112 
 113   // We don't update the rate if we've just came out of a safepoint.
 114   // delta_s is the time since last safepoint in milliseconds.
 115   jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
 116   jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
 117   // How many events were there since the last time?
 118   int event_count = m->invocation_count() + m->backedge_count();
 119   int delta_e = event_count - m->prev_event_count();
 120 
 121   // We should be running for at least 1ms.
 122   if (delta_s >= TieredRateUpdateMinTime) {
 123     // And we must've taken the previous point at least 1ms before.
 124     if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
 125       m->set_prev_time(t);
 126       m->set_prev_event_count(event_count);
 127       m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
 128     } else {
 129       if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
 130         // If nothing happened for 25ms, zero the rate. Don't modify prev values.
 131         m->set_rate(0);
 132       }
 133     }
 134   }
 135 }
 136 
 137 // Check if this method has been stale from a given number of milliseconds.
 138 // See select_task().
 139 bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
 140   jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
 141   jlong delta_t = t - m->prev_time();
 142   if (delta_t > timeout && delta_s > timeout) {
 143     int event_count = m->invocation_count() + m->backedge_count();
 144     int delta_e = event_count - m->prev_event_count();
 145     // Return true if there were no events.
 146     return delta_e == 0;
 147   }
 148   return false;
 149 }
 150 
 151 // We don't remove old methods from the compile queue even if they have
 152 // very low activity. See select_task().
 153 bool AdvancedThresholdPolicy::is_old(Method* method) {
 154   return method->invocation_count() > 50000 || method->backedge_count() > 500000;
 155 }
 156 
 157 double AdvancedThresholdPolicy::weight(Method* method) {
 158   return (double)(method->rate() + 1) *
 159     (method->invocation_count() + 1) * (method->backedge_count() + 1);
 160 }
 161 
 162 // Apply heuristics and return true if x should be compiled before y
 163 bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
 164   if (x->highest_comp_level() > y->highest_comp_level()) {
 165     // recompilation after deopt
 166     return true;
 167   } else
 168     if (x->highest_comp_level() == y->highest_comp_level()) {
 169       if (weight(x) > weight(y)) {
 170         return true;
 171       }
 172     }
 173   return false;
 174 }
 175 
 176 // Is method profiled enough?
 177 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
 178   MethodData* mdo = method->method_data();
 179   if (mdo != NULL) {
 180     int i = mdo->invocation_count_delta();
 181     int b = mdo->backedge_count_delta();
 182     return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
 183   }
 184   return false;
 185 }
 186 
 187 // Called with the queue locked and with at least one element
 188 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
 189   CompileTask *max_blocking_task = NULL;
 190   CompileTask *max_task = NULL;
 191   Method* max_method = NULL;
 192   jlong t = os::javaTimeMillis();
 193   // Iterate through the queue and find a method with a maximum rate.
 194   for (CompileTask* task = compile_queue->first(); task != NULL;) {
 195     CompileTask* next_task = task->next();
 196     Method* method = task->method();
 197     update_rate(t, method);
 198     if (max_task == NULL) {
 199       max_task = task;
 200       max_method = method;
 201     } else {
 202       // If a method has been stale for some time, remove it from the queue.
 203       // Blocking tasks and tasks submitted from whitebox API don't become stale
 204       if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
 205         if (PrintTieredEvents) {
 206           print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
 207         }
 208         compile_queue->remove_and_mark_stale(task);
 209         method->clear_queued_for_compilation();
 210         task = next_task;
 211         continue;
 212       }
 213 
 214       // Select a method with a higher rate
 215       if (compare_methods(method, max_method)) {
 216         max_task = task;
 217         max_method = method;
 218       }
 219     }
 220 
 221     if (task->is_blocking()) {
 222       if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) {
 223         max_blocking_task = task;
 224       }
 225     }
 226 
 227     task = next_task;
 228   }
 229 
 230   if (max_blocking_task != NULL) {
 231     // In blocking compilation mode, the CompileBroker will make
 232     // compilations submitted by a JVMCI compiler thread non-blocking. These
 233     // compilations should be scheduled after all blocking compilations
 234     // to service non-compiler related compilations sooner and reduce the
 235     // chance of such compilations timing out.
 236     max_task = max_blocking_task;
 237     max_method = max_task->method();
 238   }
 239 
 240   if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
 241       && is_method_profiled(max_method)) {
 242     max_task->set_comp_level(CompLevel_limited_profile);
 243     if (PrintTieredEvents) {
 244       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 245     }
 246   }
 247 
 248   return max_task;
 249 }
 250 
 251 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
 252   double queue_size = CompileBroker::queue_size(level);
 253   int comp_count = compiler_count(level);
 254   double k = queue_size / (feedback_k * comp_count) + 1;
 255 
 256   // Increase C1 compile threshold when the code cache is filled more
 257   // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 258   // The main intention is to keep enough free space for C2 compiled code
 259   // to achieve peak performance if the code cache is under stress.
 260   if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
 261     double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
 262     if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
 263       k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
 264     }
 265   }
 266   return k;
 267 }
 268 
 269 // Call and loop predicates determine whether a transition to a higher
 270 // compilation level should be performed (pointers to predicate functions
 271 // are passed to common()).
 272 // Tier?LoadFeedback is basically a coefficient that determines of
 273 // how many methods per compiler thread can be in the queue before
 274 // the threshold values double.
 275 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
 276   switch(cur_level) {
 277   case CompLevel_aot: {
 278     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 279     return loop_predicate_helper<CompLevel_aot>(i, b, k, method);
 280   }
 281   case CompLevel_none:
 282   case CompLevel_limited_profile: {
 283     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 284     return loop_predicate_helper<CompLevel_none>(i, b, k, method);
 285   }
 286   case CompLevel_full_profile: {
 287     double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 288     return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
 289   }
 290   default:
 291     return true;
 292   }
 293 }
 294 
 295 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
 296   switch(cur_level) {
 297   case CompLevel_aot: {
 298     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 299     return call_predicate_helper<CompLevel_aot>(i, b, k, method);
 300   }
 301   case CompLevel_none:
 302   case CompLevel_limited_profile: {
 303     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 304     return call_predicate_helper<CompLevel_none>(i, b, k, method);
 305   }
 306   case CompLevel_full_profile: {
 307     double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 308     return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
 309   }
 310   default:
 311     return true;
 312   }
 313 }
 314 
 315 // If a method is old enough and is still in the interpreter we would want to
 316 // start profiling without waiting for the compiled method to arrive.
 317 // We also take the load on compilers into the account.
 318 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
 319   if (cur_level == CompLevel_none &&
 320       CompileBroker::queue_size(CompLevel_full_optimization) <=
 321       Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 322     int i = method->invocation_count();
 323     int b = method->backedge_count();
 324     double k = Tier0ProfilingStartPercentage / 100.0;
 325     return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
 326   }
 327   return false;
 328 }
 329 
 330 // Inlining control: if we're compiling a profiled method with C1 and the callee
 331 // is known to have OSRed in a C2 version, don't inline it.
 332 bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
 333   CompLevel comp_level = (CompLevel)env->comp_level();
 334   if (comp_level == CompLevel_full_profile ||
 335       comp_level == CompLevel_limited_profile) {
 336     return callee->highest_osr_comp_level() == CompLevel_full_optimization;
 337   }
 338   return false;
 339 }
 340 
 341 // Create MDO if necessary.
 342 void AdvancedThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
 343   if (mh->is_native() ||
 344       mh->is_abstract() ||
 345       mh->is_accessor() ||
 346       mh->is_constant_getter()) {
 347     return;
 348   }
 349   if (mh->method_data() == NULL) {
 350     Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
 351   }
 352 }
 353 
 354 
 355 /*
 356  * Method states:
 357  *   0 - interpreter (CompLevel_none)
 358  *   1 - pure C1 (CompLevel_simple)
 359  *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
 360  *   3 - C1 with full profiling (CompLevel_full_profile)
 361  *   4 - C2 (CompLevel_full_optimization)
 362  *
 363  * Common state transition patterns:
 364  * a. 0 -> 3 -> 4.
 365  *    The most common path. But note that even in this straightforward case
 366  *    profiling can start at level 0 and finish at level 3.
 367  *
 368  * b. 0 -> 2 -> 3 -> 4.
 369  *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
 370  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
 371  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
 372  *
 373  * c. 0 -> (3->2) -> 4.
 374  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
 375  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
 376  *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
 377  *    without full profiling while c2 is compiling.
 378  *
 379  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
 380  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
 381  *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
 382  *
 383  * e. 0 -> 4.
 384  *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
 385  *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
 386  *    the compiled version already exists).
 387  *
 388  * Note that since state 0 can be reached from any other state via deoptimization different loops
 389  * are possible.
 390  *
 391  */
 392 
 393 // Common transition function. Given a predicate determines if a method should transition to another level.
 394 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
 395   CompLevel next_level = cur_level;
 396   int i = method->invocation_count();
 397   int b = method->backedge_count();
 398 
 399   if (is_trivial(method)) {
 400     next_level = CompLevel_simple;
 401   } else {
 402     switch(cur_level) {
 403       default: break;
 404       case CompLevel_aot: {
 405       // If we were at full profile level, would we switch to full opt?
 406       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
 407         next_level = CompLevel_full_optimization;
 408       } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
 409                                Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
 410                                (this->*p)(i, b, cur_level, method))) {
 411         next_level = CompLevel_full_profile;
 412       }
 413     }
 414     break;
 415     case CompLevel_none:
 416       // If we were at full profile level, would we switch to full opt?
 417       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
 418         next_level = CompLevel_full_optimization;
 419       } else if ((this->*p)(i, b, cur_level, method)) {
 420 #if INCLUDE_JVMCI
 421         if (EnableJVMCI && UseJVMCICompiler) {
 422           // Since JVMCI takes a while to warm up, its queue inevitably backs up during
 423           // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root
 424           // compilation method and all potential inlinees have mature profiles (which
 425           // includes type profiling). If it sees immature profiles, JVMCI's inliner
 426           // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to
 427           // exploring/inlining too many graphs). Since a rewrite of the inliner is
 428           // in progress, we simply disable the dialing back heuristic for now and will
 429           // revisit this decision once the new inliner is completed.
 430           next_level = CompLevel_full_profile;
 431         } else
 432 #endif
 433         {
 434           // C1-generated fully profiled code is about 30% slower than the limited profile
 435           // code that has only invocation and backedge counters. The observation is that
 436           // if C2 queue is large enough we can spend too much time in the fully profiled code
 437           // while waiting for C2 to pick the method from the queue. To alleviate this problem
 438           // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
 439           // we choose to compile a limited profiled version and then recompile with full profiling
 440           // when the load on C2 goes down.
 441           if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
 442               Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 443             next_level = CompLevel_limited_profile;
 444           } else {
 445             next_level = CompLevel_full_profile;
 446           }
 447         }
 448       }
 449       break;
 450     case CompLevel_limited_profile:
 451       if (is_method_profiled(method)) {
 452         // Special case: we got here because this method was fully profiled in the interpreter.
 453         next_level = CompLevel_full_optimization;
 454       } else {
 455         MethodData* mdo = method->method_data();
 456         if (mdo != NULL) {
 457           if (mdo->would_profile()) {
 458             if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
 459                                      Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
 460                                      (this->*p)(i, b, cur_level, method))) {
 461               next_level = CompLevel_full_profile;
 462             }
 463           } else {
 464             next_level = CompLevel_full_optimization;
 465           }
 466         } else {
 467           // If there is no MDO we need to profile
 468           if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
 469                                    Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
 470                                    (this->*p)(i, b, cur_level, method))) {
 471             next_level = CompLevel_full_profile;
 472           }
 473         }
 474       }
 475       break;
 476     case CompLevel_full_profile:
 477       {
 478         MethodData* mdo = method->method_data();
 479         if (mdo != NULL) {
 480           if (mdo->would_profile()) {
 481             int mdo_i = mdo->invocation_count_delta();
 482             int mdo_b = mdo->backedge_count_delta();
 483             if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
 484               next_level = CompLevel_full_optimization;
 485             }
 486           } else {
 487             next_level = CompLevel_full_optimization;
 488           }
 489         }
 490       }
 491       break;
 492     }
 493   }
 494   return MIN2(next_level, (CompLevel)TieredStopAtLevel);
 495 }
 496 
 497 // Determine if a method should be compiled with a normal entry point at a different level.
 498 CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) {
 499   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
 500                              common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
 501   CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
 502 
 503   // If OSR method level is greater than the regular method level, the levels should be
 504   // equalized by raising the regular method level in order to avoid OSRs during each
 505   // invocation of the method.
 506   if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
 507     MethodData* mdo = method->method_data();
 508     guarantee(mdo != NULL, "MDO should not be NULL");
 509     if (mdo->invocation_count() >= 1) {
 510       next_level = CompLevel_full_optimization;
 511     }
 512   } else {
 513     next_level = MAX2(osr_level, next_level);
 514   }
 515 #if INCLUDE_JVMCI
 516   if (UseJVMCICompiler) {
 517     next_level = JVMCIRuntime::adjust_comp_level(method, false, next_level, thread);
 518   }
 519 #endif
 520   return next_level;
 521 }
 522 
 523 // Determine if we should do an OSR compilation of a given method.
 524 CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread * thread) {
 525   CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
 526   if (cur_level == CompLevel_none) {
 527     // If there is a live OSR method that means that we deopted to the interpreter
 528     // for the transition.
 529     CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
 530     if (osr_level > CompLevel_none) {
 531       return osr_level;
 532     }
 533   }
 534 #if INCLUDE_JVMCI
 535   if (UseJVMCICompiler) {
 536     next_level = JVMCIRuntime::adjust_comp_level(method, true, next_level, thread);
 537   }
 538 #endif
 539   return next_level;
 540 }
 541 
 542 // Update the rate and submit compile
 543 void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
 544   int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
 545   update_rate(os::javaTimeMillis(), mh());
 546   CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread);
 547 }
 548 
 549 bool AdvancedThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) {
 550   if (UseAOT && !delay_compilation_during_startup()) {
 551     if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) {
 552       // If the current level is full profile or interpreter and we're switching to any other level,
 553       // activate the AOT code back first so that we won't waste time overprofiling.
 554       compile(mh, InvocationEntryBci, CompLevel_aot, thread);
 555       // Fall through for JIT compilation.
 556     }
 557     if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) {
 558       // If the next level is limited profile, use the aot code (if there is any),
 559       // since it's essentially the same thing.
 560       compile(mh, InvocationEntryBci, CompLevel_aot, thread);
 561       // Not need to JIT, we're done.
 562       return true;
 563     }
 564   }
 565   return false;
 566 }
 567 
 568 
 569 // Handle the invocation event.
 570 void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
 571                                                       CompLevel level, CompiledMethod* nm, JavaThread* thread) {
 572   if (should_create_mdo(mh(), level)) {
 573     create_mdo(mh, thread);
 574   }
 575   CompLevel next_level = call_event(mh(), level, thread);
 576   if (next_level != level) {
 577     if (maybe_switch_to_aot(mh, level, next_level, thread)) {
 578       // No JITting necessary
 579       return;
 580     }
 581     if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
 582       compile(mh, InvocationEntryBci, next_level, thread);
 583     }
 584   }
 585 }
 586 
 587 // Handle the back branch event. Notice that we can compile the method
 588 // with a regular entry from here.
 589 void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
 590                                                        int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) {
 591   if (should_create_mdo(mh(), level)) {
 592     create_mdo(mh, thread);
 593   }
 594   // Check if MDO should be created for the inlined method
 595   if (should_create_mdo(imh(), level)) {
 596     create_mdo(imh, thread);
 597   }
 598 
 599   if (is_compilation_enabled()) {
 600     CompLevel next_osr_level = loop_event(imh(), level, thread);
 601     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
 602     // At the very least compile the OSR version
 603     if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
 604       compile(imh, bci, next_osr_level, thread);
 605     }
 606 
 607     // Use loop event as an opportunity to also check if there's been
 608     // enough calls.
 609     CompLevel cur_level, next_level;
 610     if (mh() != imh()) { // If there is an enclosing method
 611       if (level == CompLevel_aot) {
 612         // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
 613         if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
 614           compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread);
 615         }
 616       } else {
 617         // Current loop event level is not AOT
 618         guarantee(nm != NULL, "Should have nmethod here");
 619         cur_level = comp_level(mh());
 620         next_level = call_event(mh(), cur_level, thread);
 621 
 622         if (max_osr_level == CompLevel_full_optimization) {
 623           // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
 624           bool make_not_entrant = false;
 625           if (nm->is_osr_method()) {
 626             // This is an osr method, just make it not entrant and recompile later if needed
 627             make_not_entrant = true;
 628           } else {
 629             if (next_level != CompLevel_full_optimization) {
 630               // next_level is not full opt, so we need to recompile the
 631               // enclosing method without the inlinee
 632               cur_level = CompLevel_none;
 633               make_not_entrant = true;
 634             }
 635           }
 636           if (make_not_entrant) {
 637             if (PrintTieredEvents) {
 638               int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
 639               print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
 640             }
 641             nm->make_not_entrant();
 642           }
 643         }
 644         // Fix up next_level if necessary to avoid deopts
 645         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
 646           next_level = CompLevel_full_profile;
 647         }
 648         if (cur_level != next_level) {
 649           if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
 650             compile(mh, InvocationEntryBci, next_level, thread);
 651           }
 652         }
 653       }
 654     } else {
 655       cur_level = comp_level(mh());
 656       next_level = call_event(mh(), cur_level, thread);
 657       if (next_level != cur_level) {
 658         if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
 659           compile(mh, InvocationEntryBci, next_level, thread);
 660         }
 661       }
 662     }
 663   }
 664 }
 665 
 666 #endif // TIERED