1 /* 2 * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeCache.hpp" 27 #include "compiler/compilerDirectives.hpp" 28 #include "compiler/compilerOracle.hpp" 29 #include "compiler/compileTask.hpp" 30 #include "runtime/advancedThresholdPolicy.hpp" 31 #include "runtime/simpleThresholdPolicy.inline.hpp" 32 33 #ifdef TIERED 34 // Print an event. 35 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh, 36 int bci, CompLevel level) { 37 tty->print(" rate="); 38 if (mh->prev_time() == 0) tty->print("n/a"); 39 else tty->print("%f", mh->rate()); 40 41 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback), 42 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback)); 43 44 } 45 46 void AdvancedThresholdPolicy::initialize() { 47 // Turn on ergonomic compiler count selection 48 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) { 49 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true); 50 } 51 int count = CICompilerCount; 52 if (CICompilerCountPerCPU) { 53 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n 54 int log_cpu = log2_intptr(os::active_processor_count()); 55 int loglog_cpu = log2_intptr(MAX2(log_cpu, 1)); 56 count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2; 57 } 58 59 set_c1_count(MAX2(count / 3, 1)); 60 set_c2_count(MAX2(count - c1_count(), 1)); 61 FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count()); 62 63 // Some inlining tuning 64 #ifdef X86 65 if (FLAG_IS_DEFAULT(InlineSmallCode)) { 66 FLAG_SET_DEFAULT(InlineSmallCode, 2000); 67 } 68 #endif 69 70 #if defined SPARC || defined AARCH64 71 if (FLAG_IS_DEFAULT(InlineSmallCode)) { 72 FLAG_SET_DEFAULT(InlineSmallCode, 2500); 73 } 74 #endif 75 76 set_increase_threshold_at_ratio(); 77 set_start_time(os::javaTimeMillis()); 78 } 79 80 // update_rate() is called from select_task() while holding a compile queue lock. 81 void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) { 82 // Skip update if counters are absent. 83 // Can't allocate them since we are holding compile queue lock. 84 if (m->method_counters() == NULL) return; 85 86 if (is_old(m)) { 87 // We don't remove old methods from the queue, 88 // so we can just zero the rate. 89 m->set_rate(0); 90 return; 91 } 92 93 // We don't update the rate if we've just came out of a safepoint. 94 // delta_s is the time since last safepoint in milliseconds. 95 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); 96 jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement 97 // How many events were there since the last time? 98 int event_count = m->invocation_count() + m->backedge_count(); 99 int delta_e = event_count - m->prev_event_count(); 100 101 // We should be running for at least 1ms. 102 if (delta_s >= TieredRateUpdateMinTime) { 103 // And we must've taken the previous point at least 1ms before. 104 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { 105 m->set_prev_time(t); 106 m->set_prev_event_count(event_count); 107 m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond 108 } else { 109 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { 110 // If nothing happened for 25ms, zero the rate. Don't modify prev values. 111 m->set_rate(0); 112 } 113 } 114 } 115 } 116 117 // Check if this method has been stale from a given number of milliseconds. 118 // See select_task(). 119 bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) { 120 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); 121 jlong delta_t = t - m->prev_time(); 122 if (delta_t > timeout && delta_s > timeout) { 123 int event_count = m->invocation_count() + m->backedge_count(); 124 int delta_e = event_count - m->prev_event_count(); 125 // Return true if there were no events. 126 return delta_e == 0; 127 } 128 return false; 129 } 130 131 // We don't remove old methods from the compile queue even if they have 132 // very low activity. See select_task(). 133 bool AdvancedThresholdPolicy::is_old(Method* method) { 134 return method->invocation_count() > 50000 || method->backedge_count() > 500000; 135 } 136 137 double AdvancedThresholdPolicy::weight(Method* method) { 138 return (double)(method->rate() + 1) * 139 (method->invocation_count() + 1) * (method->backedge_count() + 1); 140 } 141 142 // Apply heuristics and return true if x should be compiled before y 143 bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) { 144 if (x->highest_comp_level() > y->highest_comp_level()) { 145 // recompilation after deopt 146 return true; 147 } else 148 if (x->highest_comp_level() == y->highest_comp_level()) { 149 if (weight(x) > weight(y)) { 150 return true; 151 } 152 } 153 return false; 154 } 155 156 // Is method profiled enough? 157 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) { 158 MethodData* mdo = method->method_data(); 159 if (mdo != NULL) { 160 int i = mdo->invocation_count_delta(); 161 int b = mdo->backedge_count_delta(); 162 return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method); 163 } 164 return false; 165 } 166 167 // Called with the queue locked and with at least one element 168 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) { 169 #if INCLUDE_JVMCI 170 CompileTask *max_blocking_task = NULL; 171 #endif 172 CompileTask *max_task = NULL; 173 Method* max_method = NULL; 174 jlong t = os::javaTimeMillis(); 175 // Iterate through the queue and find a method with a maximum rate. 176 for (CompileTask* task = compile_queue->first(); task != NULL;) { 177 CompileTask* next_task = task->next(); 178 Method* method = task->method(); 179 update_rate(t, method); 180 if (max_task == NULL) { 181 max_task = task; 182 max_method = method; 183 } else { 184 // Prefer 'blocking' compilations to minimize waits on them. 185 // This also helps to prevent blocking compiles from getting stale. 186 CompLevel level = (CompLevel)task->comp_level(); 187 bool backgroundCompilation; 188 DirectiveSet* directive = 189 DirectivesStack::getMatchingDirective(methodHandle(method), 190 CompileBroker::compiler(level)); 191 backgroundCompilation = directive->BackgroundCompilationOption; 192 DirectivesStack::release(directive); 193 if (backgroundCompilation == false) { 194 max_task = task; 195 max_method = method; 196 break; 197 } 198 // If a method has been stale for some time, remove it from the queue. 199 if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) { 200 if (PrintTieredEvents) { 201 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), level); 202 } 203 task->log_task_dequeued("stale"); 204 compile_queue->remove_and_mark_stale(task); 205 method->clear_queued_for_compilation(); 206 task = next_task; 207 continue; 208 } 209 210 // Select a method with a higher rate 211 if (compare_methods(method, max_method)) { 212 max_task = task; 213 max_method = method; 214 } 215 } 216 #if INCLUDE_JVMCI 217 if (UseJVMCICompiler && task->is_blocking()) { 218 if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) { 219 max_blocking_task = task; 220 } 221 } 222 #endif 223 task = next_task; 224 } 225 226 #if INCLUDE_JVMCI 227 if (UseJVMCICompiler) { 228 if (max_blocking_task != NULL) { 229 // In blocking compilation mode, the CompileBroker will make 230 // compilations submitted by a JVMCI compiler thread non-blocking. These 231 // compilations should be scheduled after all blocking compilations 232 // to service non-compiler related compilations sooner and reduce the 233 // chance of such compilations timing out. 234 max_task = max_blocking_task; 235 max_method = max_task->method(); 236 } 237 } 238 #endif 239 240 if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile 241 && is_method_profiled(max_method)) { 242 max_task->set_comp_level(CompLevel_limited_profile); 243 if (PrintTieredEvents) { 244 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); 245 } 246 } 247 248 return max_task; 249 } 250 251 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) { 252 double queue_size = CompileBroker::queue_size(level); 253 int comp_count = compiler_count(level); 254 double k = queue_size / (feedback_k * comp_count) + 1; 255 256 // Increase C1 compile threshold when the code cache is filled more 257 // than specified by IncreaseFirstTierCompileThresholdAt percentage. 258 // The main intention is to keep enough free space for C2 compiled code 259 // to achieve peak performance if the code cache is under stress. 260 if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) { 261 double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level)); 262 if (current_reverse_free_ratio > _increase_threshold_at_ratio) { 263 k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); 264 } 265 } 266 return k; 267 } 268 269 // Call and loop predicates determine whether a transition to a higher 270 // compilation level should be performed (pointers to predicate functions 271 // are passed to common()). 272 // Tier?LoadFeedback is basically a coefficient that determines of 273 // how many methods per compiler thread can be in the queue before 274 // the threshold values double. 275 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) { 276 switch(cur_level) { 277 case CompLevel_none: 278 case CompLevel_limited_profile: { 279 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); 280 return loop_predicate_helper<CompLevel_none>(i, b, k, method); 281 } 282 case CompLevel_full_profile: { 283 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); 284 return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method); 285 } 286 default: 287 return true; 288 } 289 } 290 291 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) { 292 switch(cur_level) { 293 case CompLevel_none: 294 case CompLevel_limited_profile: { 295 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); 296 return call_predicate_helper<CompLevel_none>(i, b, k, method); 297 } 298 case CompLevel_full_profile: { 299 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); 300 return call_predicate_helper<CompLevel_full_profile>(i, b, k, method); 301 } 302 default: 303 return true; 304 } 305 } 306 307 // If a method is old enough and is still in the interpreter we would want to 308 // start profiling without waiting for the compiled method to arrive. 309 // We also take the load on compilers into the account. 310 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) { 311 if (cur_level == CompLevel_none && 312 CompileBroker::queue_size(CompLevel_full_optimization) <= 313 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { 314 int i = method->invocation_count(); 315 int b = method->backedge_count(); 316 double k = Tier0ProfilingStartPercentage / 100.0; 317 return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method); 318 } 319 return false; 320 } 321 322 // Inlining control: if we're compiling a profiled method with C1 and the callee 323 // is known to have OSRed in a C2 version, don't inline it. 324 bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) { 325 CompLevel comp_level = (CompLevel)env->comp_level(); 326 if (comp_level == CompLevel_full_profile || 327 comp_level == CompLevel_limited_profile) { 328 return callee->highest_osr_comp_level() == CompLevel_full_optimization; 329 } 330 return false; 331 } 332 333 // Create MDO if necessary. 334 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) { 335 if (mh->is_native() || 336 mh->is_abstract() || 337 mh->is_accessor() || 338 mh->is_constant_getter()) { 339 return; 340 } 341 if (mh->method_data() == NULL) { 342 Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR); 343 } 344 } 345 346 347 /* 348 * Method states: 349 * 0 - interpreter (CompLevel_none) 350 * 1 - pure C1 (CompLevel_simple) 351 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile) 352 * 3 - C1 with full profiling (CompLevel_full_profile) 353 * 4 - C2 (CompLevel_full_optimization) 354 * 355 * Common state transition patterns: 356 * a. 0 -> 3 -> 4. 357 * The most common path. But note that even in this straightforward case 358 * profiling can start at level 0 and finish at level 3. 359 * 360 * b. 0 -> 2 -> 3 -> 4. 361 * This case occurs when the load on C2 is deemed too high. So, instead of transitioning 362 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to 363 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs. 364 * 365 * c. 0 -> (3->2) -> 4. 366 * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough 367 * to enable the profiling to fully occur at level 0. In this case we change the compilation level 368 * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster 369 * without full profiling while c2 is compiling. 370 * 371 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1. 372 * After a method was once compiled with C1 it can be identified as trivial and be compiled to 373 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1. 374 * 375 * e. 0 -> 4. 376 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter) 377 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because 378 * the compiled version already exists). 379 * 380 * Note that since state 0 can be reached from any other state via deoptimization different loops 381 * are possible. 382 * 383 */ 384 385 // Common transition function. Given a predicate determines if a method should transition to another level. 386 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) { 387 CompLevel next_level = cur_level; 388 int i = method->invocation_count(); 389 int b = method->backedge_count(); 390 391 if (is_trivial(method)) { 392 next_level = CompLevel_simple; 393 } else { 394 switch(cur_level) { 395 case CompLevel_none: 396 // If we were at full profile level, would we switch to full opt? 397 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { 398 next_level = CompLevel_full_optimization; 399 } else if ((this->*p)(i, b, cur_level, method)) { 400 #if INCLUDE_JVMCI 401 if (UseJVMCICompiler) { 402 // Since JVMCI takes a while to warm up, its queue inevitably backs up during 403 // early VM execution. 404 next_level = CompLevel_full_profile; 405 break; 406 } 407 #endif 408 // C1-generated fully profiled code is about 30% slower than the limited profile 409 // code that has only invocation and backedge counters. The observation is that 410 // if C2 queue is large enough we can spend too much time in the fully profiled code 411 // while waiting for C2 to pick the method from the queue. To alleviate this problem 412 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long 413 // we choose to compile a limited profiled version and then recompile with full profiling 414 // when the load on C2 goes down. 415 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > 416 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { 417 next_level = CompLevel_limited_profile; 418 } else { 419 next_level = CompLevel_full_profile; 420 } 421 } 422 break; 423 case CompLevel_limited_profile: 424 if (is_method_profiled(method)) { 425 // Special case: we got here because this method was fully profiled in the interpreter. 426 next_level = CompLevel_full_optimization; 427 } else { 428 MethodData* mdo = method->method_data(); 429 if (mdo != NULL) { 430 if (mdo->would_profile()) { 431 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= 432 Tier3DelayOff * compiler_count(CompLevel_full_optimization) && 433 (this->*p)(i, b, cur_level, method))) { 434 next_level = CompLevel_full_profile; 435 } 436 } else { 437 next_level = CompLevel_full_optimization; 438 } 439 } 440 } 441 break; 442 case CompLevel_full_profile: 443 { 444 MethodData* mdo = method->method_data(); 445 if (mdo != NULL) { 446 if (mdo->would_profile()) { 447 int mdo_i = mdo->invocation_count_delta(); 448 int mdo_b = mdo->backedge_count_delta(); 449 if ((this->*p)(mdo_i, mdo_b, cur_level, method)) { 450 next_level = CompLevel_full_optimization; 451 } 452 } else { 453 next_level = CompLevel_full_optimization; 454 } 455 } 456 } 457 break; 458 } 459 } 460 return MIN2(next_level, (CompLevel)TieredStopAtLevel); 461 } 462 463 // Determine if a method should be compiled with a normal entry point at a different level. 464 CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) { 465 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), 466 common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true)); 467 CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level); 468 469 // If OSR method level is greater than the regular method level, the levels should be 470 // equalized by raising the regular method level in order to avoid OSRs during each 471 // invocation of the method. 472 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) { 473 MethodData* mdo = method->method_data(); 474 guarantee(mdo != NULL, "MDO should not be NULL"); 475 if (mdo->invocation_count() >= 1) { 476 next_level = CompLevel_full_optimization; 477 } 478 } else { 479 next_level = MAX2(osr_level, next_level); 480 } 481 return next_level; 482 } 483 484 // Determine if we should do an OSR compilation of a given method. 485 CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level) { 486 CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true); 487 if (cur_level == CompLevel_none) { 488 // If there is a live OSR method that means that we deopted to the interpreter 489 // for the transition. 490 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level); 491 if (osr_level > CompLevel_none) { 492 return osr_level; 493 } 494 } 495 return next_level; 496 } 497 498 // Update the rate and submit compile 499 void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { 500 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count(); 501 update_rate(os::javaTimeMillis(), mh()); 502 CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread); 503 } 504 505 // Handle the invocation event. 506 void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, 507 CompLevel level, nmethod* nm, JavaThread* thread) { 508 if (should_create_mdo(mh(), level)) { 509 create_mdo(mh, thread); 510 } 511 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { 512 CompLevel next_level = call_event(mh(), level); 513 if (next_level != level) { 514 compile(mh, InvocationEntryBci, next_level, thread); 515 } 516 } 517 } 518 519 // Handle the back branch event. Notice that we can compile the method 520 // with a regular entry from here. 521 void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, 522 int bci, CompLevel level, nmethod* nm, JavaThread* thread) { 523 if (should_create_mdo(mh(), level)) { 524 create_mdo(mh, thread); 525 } 526 // Check if MDO should be created for the inlined method 527 if (should_create_mdo(imh(), level)) { 528 create_mdo(imh, thread); 529 } 530 531 if (is_compilation_enabled()) { 532 CompLevel next_osr_level = loop_event(imh(), level); 533 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); 534 // At the very least compile the OSR version 535 if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) { 536 compile(imh, bci, next_osr_level, thread); 537 } 538 539 // Use loop event as an opportunity to also check if there's been 540 // enough calls. 541 CompLevel cur_level, next_level; 542 if (mh() != imh()) { // If there is an enclosing method 543 guarantee(nm != NULL, "Should have nmethod here"); 544 cur_level = comp_level(mh()); 545 next_level = call_event(mh(), cur_level); 546 547 if (max_osr_level == CompLevel_full_optimization) { 548 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts 549 bool make_not_entrant = false; 550 if (nm->is_osr_method()) { 551 // This is an osr method, just make it not entrant and recompile later if needed 552 make_not_entrant = true; 553 } else { 554 if (next_level != CompLevel_full_optimization) { 555 // next_level is not full opt, so we need to recompile the 556 // enclosing method without the inlinee 557 cur_level = CompLevel_none; 558 make_not_entrant = true; 559 } 560 } 561 if (make_not_entrant) { 562 if (PrintTieredEvents) { 563 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; 564 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); 565 } 566 nm->make_not_entrant(); 567 } 568 } 569 if (!CompileBroker::compilation_is_in_queue(mh)) { 570 // Fix up next_level if necessary to avoid deopts 571 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { 572 next_level = CompLevel_full_profile; 573 } 574 if (cur_level != next_level) { 575 compile(mh, InvocationEntryBci, next_level, thread); 576 } 577 } 578 } else { 579 cur_level = comp_level(imh()); 580 next_level = call_event(imh(), cur_level); 581 if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) { 582 compile(imh, InvocationEntryBci, next_level, thread); 583 } 584 } 585 } 586 } 587 588 #endif // TIERED