1 /* 2 * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileBroker.hpp" 27 #include "compiler/compilerOracle.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "runtime/arguments.hpp" 30 #include "runtime/handles.inline.hpp" 31 #include "runtime/safepoint.hpp" 32 #include "runtime/safepointVerifiers.hpp" 33 #include "runtime/tieredThresholdPolicy.hpp" 34 #include "code/scopeDesc.hpp" 35 #include "oops/method.inline.hpp" 36 #if INCLUDE_JVMCI 37 #include "jvmci/jvmci.hpp" 38 #endif 39 40 #ifdef TIERED 41 42 #include "c1/c1_Compiler.hpp" 43 #include "opto/c2compiler.hpp" 44 45 template<CompLevel level> 46 bool TieredThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) { 47 double threshold_scaling; 48 if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { 49 scale *= threshold_scaling; 50 } 51 switch(level) { 52 case CompLevel_aot: 53 return (i >= Tier3AOTInvocationThreshold * scale) || 54 (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale); 55 case CompLevel_none: 56 case CompLevel_limited_profile: 57 return (i >= Tier3InvocationThreshold * scale) || 58 (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale); 59 case CompLevel_full_profile: 60 return (i >= Tier4InvocationThreshold * scale) || 61 (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale); 62 } 63 return true; 64 } 65 66 template<CompLevel level> 67 bool TieredThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) { 68 double threshold_scaling; 69 if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { 70 scale *= threshold_scaling; 71 } 72 switch(level) { 73 case CompLevel_aot: 74 return b >= Tier3AOTBackEdgeThreshold * scale; 75 case CompLevel_none: 76 case CompLevel_limited_profile: 77 return b >= Tier3BackEdgeThreshold * scale; 78 case CompLevel_full_profile: 79 return b >= Tier4BackEdgeThreshold * scale; 80 } 81 return true; 82 } 83 84 // Simple methods are as good being compiled with C1 as C2. 85 // Determine if a given method is such a case. 86 bool TieredThresholdPolicy::is_trivial(Method* method) { 87 if (method->is_accessor() || 88 method->is_constant_getter()) { 89 return true; 90 } 91 return false; 92 } 93 94 CompLevel TieredThresholdPolicy::comp_level(Method* method) { 95 CompiledMethod *nm = method->code(); 96 if (nm != NULL && nm->is_in_use()) { 97 return (CompLevel)nm->comp_level(); 98 } 99 return CompLevel_none; 100 } 101 102 void TieredThresholdPolicy::print_counters(const char* prefix, const methodHandle& mh) { 103 int invocation_count = mh->invocation_count(); 104 int backedge_count = mh->backedge_count(); 105 MethodData* mdh = mh->method_data(); 106 int mdo_invocations = 0, mdo_backedges = 0; 107 int mdo_invocations_start = 0, mdo_backedges_start = 0; 108 if (mdh != NULL) { 109 mdo_invocations = mdh->invocation_count(); 110 mdo_backedges = mdh->backedge_count(); 111 mdo_invocations_start = mdh->invocation_count_start(); 112 mdo_backedges_start = mdh->backedge_count_start(); 113 } 114 tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix, 115 invocation_count, backedge_count, prefix, 116 mdo_invocations, mdo_invocations_start, 117 mdo_backedges, mdo_backedges_start); 118 tty->print(" %smax levels=%d,%d", prefix, 119 mh->highest_comp_level(), mh->highest_osr_comp_level()); 120 } 121 122 // Print an event. 123 void TieredThresholdPolicy::print_event(EventType type, const methodHandle& mh, const methodHandle& imh, 124 int bci, CompLevel level) { 125 bool inlinee_event = mh() != imh(); 126 127 ttyLocker tty_lock; 128 tty->print("%lf: [", os::elapsedTime()); 129 130 switch(type) { 131 case CALL: 132 tty->print("call"); 133 break; 134 case LOOP: 135 tty->print("loop"); 136 break; 137 case COMPILE: 138 tty->print("compile"); 139 break; 140 case REMOVE_FROM_QUEUE: 141 tty->print("remove-from-queue"); 142 break; 143 case UPDATE_IN_QUEUE: 144 tty->print("update-in-queue"); 145 break; 146 case REPROFILE: 147 tty->print("reprofile"); 148 break; 149 case MAKE_NOT_ENTRANT: 150 tty->print("make-not-entrant"); 151 break; 152 default: 153 tty->print("unknown"); 154 } 155 156 tty->print(" level=%d ", level); 157 158 ResourceMark rm; 159 char *method_name = mh->name_and_sig_as_C_string(); 160 tty->print("[%s", method_name); 161 if (inlinee_event) { 162 char *inlinee_name = imh->name_and_sig_as_C_string(); 163 tty->print(" [%s]] ", inlinee_name); 164 } 165 else tty->print("] "); 166 tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile), 167 CompileBroker::queue_size(CompLevel_full_optimization)); 168 169 print_specific(type, mh, imh, bci, level); 170 171 if (type != COMPILE) { 172 print_counters("", mh); 173 if (inlinee_event) { 174 print_counters("inlinee ", imh); 175 } 176 tty->print(" compilable="); 177 bool need_comma = false; 178 if (!mh->is_not_compilable(CompLevel_full_profile)) { 179 tty->print("c1"); 180 need_comma = true; 181 } 182 if (!mh->is_not_osr_compilable(CompLevel_full_profile)) { 183 if (need_comma) tty->print(","); 184 tty->print("c1-osr"); 185 need_comma = true; 186 } 187 if (!mh->is_not_compilable(CompLevel_full_optimization)) { 188 if (need_comma) tty->print(","); 189 tty->print("c2"); 190 need_comma = true; 191 } 192 if (!mh->is_not_osr_compilable(CompLevel_full_optimization)) { 193 if (need_comma) tty->print(","); 194 tty->print("c2-osr"); 195 } 196 tty->print(" status="); 197 if (mh->queued_for_compilation()) { 198 tty->print("in-queue"); 199 } else tty->print("idle"); 200 } 201 tty->print_cr("]"); 202 } 203 204 void TieredThresholdPolicy::initialize() { 205 int count = CICompilerCount; 206 bool c1_only = TieredStopAtLevel < CompLevel_full_optimization; 207 #ifdef _LP64 208 // Turn on ergonomic compiler count selection 209 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) { 210 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true); 211 } 212 if (CICompilerCountPerCPU) { 213 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n 214 int log_cpu = log2_int(os::active_processor_count()); 215 int loglog_cpu = log2_int(MAX2(log_cpu, 1)); 216 count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2); 217 // Make sure there is enough space in the code cache to hold all the compiler buffers 218 size_t c1_size = Compiler::code_buffer_size(); 219 size_t c2_size = C2Compiler::initial_code_buffer_size(); 220 size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3); 221 int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size; 222 if (count > max_count) { 223 // Lower the compiler count such that all buffers fit into the code cache 224 count = MAX2(max_count, c1_only ? 1 : 2); 225 } 226 FLAG_SET_ERGO(intx, CICompilerCount, count); 227 } 228 #else 229 // On 32-bit systems, the number of compiler threads is limited to 3. 230 // On these systems, the virtual address space available to the JVM 231 // is usually limited to 2-4 GB (the exact value depends on the platform). 232 // As the compilers (especially C2) can consume a large amount of 233 // memory, scaling the number of compiler threads with the number of 234 // available cores can result in the exhaustion of the address space 235 /// available to the VM and thus cause the VM to crash. 236 if (FLAG_IS_DEFAULT(CICompilerCount)) { 237 count = 3; 238 FLAG_SET_ERGO(intx, CICompilerCount, count); 239 } 240 #endif 241 242 if (c1_only) { 243 // No C2 compiler thread required 244 set_c1_count(count); 245 } else { 246 set_c1_count(MAX2(count / 3, 1)); 247 set_c2_count(MAX2(count - c1_count(), 1)); 248 } 249 assert(count == c1_count() + c2_count(), "inconsistent compiler thread count"); 250 251 // Some inlining tuning 252 #ifdef X86 253 if (FLAG_IS_DEFAULT(InlineSmallCode)) { 254 FLAG_SET_DEFAULT(InlineSmallCode, 2000); 255 } 256 #endif 257 258 #if defined SPARC || defined AARCH64 259 if (FLAG_IS_DEFAULT(InlineSmallCode)) { 260 FLAG_SET_DEFAULT(InlineSmallCode, 2500); 261 } 262 #endif 263 264 set_increase_threshold_at_ratio(); 265 set_start_time(os::javaTimeMillis()); 266 } 267 268 void TieredThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) { 269 if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) { 270 counter->set_carry_flag(); 271 } 272 } 273 274 // Set carry flags on the counters if necessary 275 void TieredThresholdPolicy::handle_counter_overflow(Method* method) { 276 MethodCounters *mcs = method->method_counters(); 277 if (mcs != NULL) { 278 set_carry_if_necessary(mcs->invocation_counter()); 279 set_carry_if_necessary(mcs->backedge_counter()); 280 } 281 MethodData* mdo = method->method_data(); 282 if (mdo != NULL) { 283 set_carry_if_necessary(mdo->invocation_counter()); 284 set_carry_if_necessary(mdo->backedge_counter()); 285 } 286 } 287 288 // Called with the queue locked and with at least one element 289 CompileTask* TieredThresholdPolicy::select_task(CompileQueue* compile_queue) { 290 CompileTask *max_blocking_task = NULL; 291 CompileTask *max_task = NULL; 292 Method* max_method = NULL; 293 jlong t = os::javaTimeMillis(); 294 // Iterate through the queue and find a method with a maximum rate. 295 for (CompileTask* task = compile_queue->first(); task != NULL;) { 296 CompileTask* next_task = task->next(); 297 Method* method = task->method(); 298 // If a method was unloaded or has been stale for some time, remove it from the queue. 299 // Blocking tasks and tasks submitted from whitebox API don't become stale 300 if (task->is_unloaded() || (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method))) { 301 if (!task->is_unloaded()) { 302 if (PrintTieredEvents) { 303 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level()); 304 } 305 method->clear_queued_for_compilation(); 306 } 307 compile_queue->remove_and_mark_stale(task); 308 task = next_task; 309 continue; 310 } 311 update_rate(t, method); 312 if (max_task == NULL || compare_methods(method, max_method)) { 313 // Select a method with the highest rate 314 max_task = task; 315 max_method = method; 316 } 317 318 if (task->is_blocking()) { 319 if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) { 320 max_blocking_task = task; 321 } 322 } 323 324 task = next_task; 325 } 326 327 if (max_blocking_task != NULL) { 328 // In blocking compilation mode, the CompileBroker will make 329 // compilations submitted by a JVMCI compiler thread non-blocking. These 330 // compilations should be scheduled after all blocking compilations 331 // to service non-compiler related compilations sooner and reduce the 332 // chance of such compilations timing out. 333 max_task = max_blocking_task; 334 max_method = max_task->method(); 335 } 336 337 if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile && 338 TieredStopAtLevel > CompLevel_full_profile && 339 max_method != NULL && is_method_profiled(max_method)) { 340 max_task->set_comp_level(CompLevel_limited_profile); 341 if (PrintTieredEvents) { 342 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); 343 } 344 } 345 346 return max_task; 347 } 348 349 void TieredThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { 350 for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) { 351 if (PrintTieredEvents) { 352 methodHandle mh(sd->method()); 353 print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none); 354 } 355 MethodData* mdo = sd->method()->method_data(); 356 if (mdo != NULL) { 357 mdo->reset_start_counters(); 358 } 359 if (sd->is_top()) break; 360 } 361 } 362 363 nmethod* TieredThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee, 364 int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) { 365 if (comp_level == CompLevel_none && 366 JvmtiExport::can_post_interpreter_events() && 367 thread->is_interp_only_mode()) { 368 return NULL; 369 } 370 if (ReplayCompiles) { 371 // Don't trigger other compiles in testing mode 372 return NULL; 373 } 374 375 handle_counter_overflow(method()); 376 if (method() != inlinee()) { 377 handle_counter_overflow(inlinee()); 378 } 379 380 if (PrintTieredEvents) { 381 print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level); 382 } 383 384 if (bci == InvocationEntryBci) { 385 method_invocation_event(method, inlinee, comp_level, nm, thread); 386 } else { 387 // method == inlinee if the event originated in the main method 388 method_back_branch_event(method, inlinee, bci, comp_level, nm, thread); 389 // Check if event led to a higher level OSR compilation 390 nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, comp_level, false); 391 if (osr_nm != NULL && osr_nm->comp_level() > comp_level) { 392 // Perform OSR with new nmethod 393 return osr_nm; 394 } 395 } 396 return NULL; 397 } 398 399 // Check if the method can be compiled, change level if necessary 400 void TieredThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { 401 assert(level <= TieredStopAtLevel, "Invalid compilation level"); 402 if (level == CompLevel_none) { 403 return; 404 } 405 if (level == CompLevel_aot) { 406 if (mh->has_aot_code()) { 407 if (PrintTieredEvents) { 408 print_event(COMPILE, mh, mh, bci, level); 409 } 410 MutexLocker ml(Compile_lock); 411 NoSafepointVerifier nsv; 412 if (mh->has_aot_code() && mh->code() != mh->aot_code()) { 413 mh->aot_code()->make_entrant(); 414 if (mh->has_compiled_code()) { 415 mh->code()->make_not_entrant(); 416 } 417 Method::set_code(mh, mh->aot_code()); 418 } 419 } 420 return; 421 } 422 423 // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling 424 // in the interpreter and then compile with C2 (the transition function will request that, 425 // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with 426 // pure C1. 427 if (!can_be_compiled(mh, level)) { 428 if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) { 429 compile(mh, bci, CompLevel_simple, thread); 430 } 431 return; 432 } 433 if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) { 434 return; 435 } 436 if (!CompileBroker::compilation_is_in_queue(mh)) { 437 if (PrintTieredEvents) { 438 print_event(COMPILE, mh, mh, bci, level); 439 } 440 submit_compile(mh, bci, level, thread); 441 } 442 } 443 444 // Update the rate and submit compile 445 void TieredThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { 446 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count(); 447 update_rate(os::javaTimeMillis(), mh()); 448 CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread); 449 } 450 451 // Print an event. 452 void TieredThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, 453 int bci, CompLevel level) { 454 tty->print(" rate="); 455 if (mh->prev_time() == 0) tty->print("n/a"); 456 else tty->print("%f", mh->rate()); 457 458 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback), 459 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback)); 460 461 } 462 463 // update_rate() is called from select_task() while holding a compile queue lock. 464 void TieredThresholdPolicy::update_rate(jlong t, Method* m) { 465 // Skip update if counters are absent. 466 // Can't allocate them since we are holding compile queue lock. 467 if (m->method_counters() == NULL) return; 468 469 if (is_old(m)) { 470 // We don't remove old methods from the queue, 471 // so we can just zero the rate. 472 m->set_rate(0); 473 return; 474 } 475 476 // We don't update the rate if we've just came out of a safepoint. 477 // delta_s is the time since last safepoint in milliseconds. 478 jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms(); 479 jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement 480 // How many events were there since the last time? 481 int event_count = m->invocation_count() + m->backedge_count(); 482 int delta_e = event_count - m->prev_event_count(); 483 484 // We should be running for at least 1ms. 485 if (delta_s >= TieredRateUpdateMinTime) { 486 // And we must've taken the previous point at least 1ms before. 487 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { 488 m->set_prev_time(t); 489 m->set_prev_event_count(event_count); 490 m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond 491 } else { 492 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { 493 // If nothing happened for 25ms, zero the rate. Don't modify prev values. 494 m->set_rate(0); 495 } 496 } 497 } 498 } 499 500 // Check if this method has been stale for a given number of milliseconds. 501 // See select_task(). 502 bool TieredThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) { 503 jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms(); 504 jlong delta_t = t - m->prev_time(); 505 if (delta_t > timeout && delta_s > timeout) { 506 int event_count = m->invocation_count() + m->backedge_count(); 507 int delta_e = event_count - m->prev_event_count(); 508 // Return true if there were no events. 509 return delta_e == 0; 510 } 511 return false; 512 } 513 514 // We don't remove old methods from the compile queue even if they have 515 // very low activity. See select_task(). 516 bool TieredThresholdPolicy::is_old(Method* method) { 517 return method->invocation_count() > 50000 || method->backedge_count() > 500000; 518 } 519 520 double TieredThresholdPolicy::weight(Method* method) { 521 return (double)(method->rate() + 1) * 522 (method->invocation_count() + 1) * (method->backedge_count() + 1); 523 } 524 525 // Apply heuristics and return true if x should be compiled before y 526 bool TieredThresholdPolicy::compare_methods(Method* x, Method* y) { 527 if (x->highest_comp_level() > y->highest_comp_level()) { 528 // recompilation after deopt 529 return true; 530 } else 531 if (x->highest_comp_level() == y->highest_comp_level()) { 532 if (weight(x) > weight(y)) { 533 return true; 534 } 535 } 536 return false; 537 } 538 539 // Is method profiled enough? 540 bool TieredThresholdPolicy::is_method_profiled(Method* method) { 541 MethodData* mdo = method->method_data(); 542 if (mdo != NULL) { 543 int i = mdo->invocation_count_delta(); 544 int b = mdo->backedge_count_delta(); 545 return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method); 546 } 547 return false; 548 } 549 550 double TieredThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) { 551 double queue_size = CompileBroker::queue_size(level); 552 int comp_count = compiler_count(level); 553 double k = queue_size / (feedback_k * comp_count) + 1; 554 555 // Increase C1 compile threshold when the code cache is filled more 556 // than specified by IncreaseFirstTierCompileThresholdAt percentage. 557 // The main intention is to keep enough free space for C2 compiled code 558 // to achieve peak performance if the code cache is under stress. 559 if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) { 560 double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level)); 561 if (current_reverse_free_ratio > _increase_threshold_at_ratio) { 562 k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); 563 } 564 } 565 return k; 566 } 567 568 // Call and loop predicates determine whether a transition to a higher 569 // compilation level should be performed (pointers to predicate functions 570 // are passed to common()). 571 // Tier?LoadFeedback is basically a coefficient that determines of 572 // how many methods per compiler thread can be in the queue before 573 // the threshold values double. 574 bool TieredThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) { 575 switch(cur_level) { 576 case CompLevel_aot: { 577 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); 578 return loop_predicate_helper<CompLevel_aot>(i, b, k, method); 579 } 580 case CompLevel_none: 581 case CompLevel_limited_profile: { 582 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); 583 return loop_predicate_helper<CompLevel_none>(i, b, k, method); 584 } 585 case CompLevel_full_profile: { 586 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); 587 return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method); 588 } 589 default: 590 return true; 591 } 592 } 593 594 bool TieredThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) { 595 switch(cur_level) { 596 case CompLevel_aot: { 597 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); 598 return call_predicate_helper<CompLevel_aot>(i, b, k, method); 599 } 600 case CompLevel_none: 601 case CompLevel_limited_profile: { 602 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); 603 return call_predicate_helper<CompLevel_none>(i, b, k, method); 604 } 605 case CompLevel_full_profile: { 606 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); 607 return call_predicate_helper<CompLevel_full_profile>(i, b, k, method); 608 } 609 default: 610 return true; 611 } 612 } 613 614 // Determine is a method is mature. 615 bool TieredThresholdPolicy::is_mature(Method* method) { 616 if (is_trivial(method)) return true; 617 MethodData* mdo = method->method_data(); 618 if (mdo != NULL) { 619 int i = mdo->invocation_count(); 620 int b = mdo->backedge_count(); 621 double k = ProfileMaturityPercentage / 100.0; 622 return call_predicate_helper<CompLevel_full_profile>(i, b, k, method) || 623 loop_predicate_helper<CompLevel_full_profile>(i, b, k, method); 624 } 625 return false; 626 } 627 628 // If a method is old enough and is still in the interpreter we would want to 629 // start profiling without waiting for the compiled method to arrive. 630 // We also take the load on compilers into the account. 631 bool TieredThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) { 632 if (cur_level == CompLevel_none && 633 CompileBroker::queue_size(CompLevel_full_optimization) <= 634 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { 635 int i = method->invocation_count(); 636 int b = method->backedge_count(); 637 double k = Tier0ProfilingStartPercentage / 100.0; 638 return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method); 639 } 640 return false; 641 } 642 643 // Inlining control: if we're compiling a profiled method with C1 and the callee 644 // is known to have OSRed in a C2 version, don't inline it. 645 bool TieredThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) { 646 CompLevel comp_level = (CompLevel)env->comp_level(); 647 if (comp_level == CompLevel_full_profile || 648 comp_level == CompLevel_limited_profile) { 649 return callee->highest_osr_comp_level() == CompLevel_full_optimization; 650 } 651 return false; 652 } 653 654 // Create MDO if necessary. 655 void TieredThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) { 656 if (mh->is_native() || 657 mh->is_abstract() || 658 mh->is_accessor() || 659 mh->is_constant_getter()) { 660 return; 661 } 662 if (mh->method_data() == NULL) { 663 Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR); 664 } 665 } 666 667 668 /* 669 * Method states: 670 * 0 - interpreter (CompLevel_none) 671 * 1 - pure C1 (CompLevel_simple) 672 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile) 673 * 3 - C1 with full profiling (CompLevel_full_profile) 674 * 4 - C2 (CompLevel_full_optimization) 675 * 676 * Common state transition patterns: 677 * a. 0 -> 3 -> 4. 678 * The most common path. But note that even in this straightforward case 679 * profiling can start at level 0 and finish at level 3. 680 * 681 * b. 0 -> 2 -> 3 -> 4. 682 * This case occurs when the load on C2 is deemed too high. So, instead of transitioning 683 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to 684 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs. 685 * 686 * c. 0 -> (3->2) -> 4. 687 * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough 688 * to enable the profiling to fully occur at level 0. In this case we change the compilation level 689 * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster 690 * without full profiling while c2 is compiling. 691 * 692 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1. 693 * After a method was once compiled with C1 it can be identified as trivial and be compiled to 694 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1. 695 * 696 * e. 0 -> 4. 697 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter) 698 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because 699 * the compiled version already exists). 700 * 701 * Note that since state 0 can be reached from any other state via deoptimization different loops 702 * are possible. 703 * 704 */ 705 706 // Common transition function. Given a predicate determines if a method should transition to another level. 707 CompLevel TieredThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) { 708 CompLevel next_level = cur_level; 709 int i = method->invocation_count(); 710 int b = method->backedge_count(); 711 712 if (is_trivial(method)) { 713 next_level = CompLevel_simple; 714 } else { 715 switch(cur_level) { 716 default: break; 717 case CompLevel_aot: { 718 // If we were at full profile level, would we switch to full opt? 719 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { 720 next_level = CompLevel_full_optimization; 721 } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= 722 Tier3DelayOff * compiler_count(CompLevel_full_optimization) && 723 (this->*p)(i, b, cur_level, method))) { 724 next_level = CompLevel_full_profile; 725 } 726 } 727 break; 728 case CompLevel_none: 729 // If we were at full profile level, would we switch to full opt? 730 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { 731 next_level = CompLevel_full_optimization; 732 } else if ((this->*p)(i, b, cur_level, method)) { 733 #if INCLUDE_JVMCI 734 if (EnableJVMCI && UseJVMCICompiler) { 735 // Since JVMCI takes a while to warm up, its queue inevitably backs up during 736 // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root 737 // compilation method and all potential inlinees have mature profiles (which 738 // includes type profiling). If it sees immature profiles, JVMCI's inliner 739 // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to 740 // exploring/inlining too many graphs). Since a rewrite of the inliner is 741 // in progress, we simply disable the dialing back heuristic for now and will 742 // revisit this decision once the new inliner is completed. 743 next_level = CompLevel_full_profile; 744 } else 745 #endif 746 { 747 // C1-generated fully profiled code is about 30% slower than the limited profile 748 // code that has only invocation and backedge counters. The observation is that 749 // if C2 queue is large enough we can spend too much time in the fully profiled code 750 // while waiting for C2 to pick the method from the queue. To alleviate this problem 751 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long 752 // we choose to compile a limited profiled version and then recompile with full profiling 753 // when the load on C2 goes down. 754 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > 755 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { 756 next_level = CompLevel_limited_profile; 757 } else { 758 next_level = CompLevel_full_profile; 759 } 760 } 761 } 762 break; 763 case CompLevel_limited_profile: 764 if (is_method_profiled(method)) { 765 // Special case: we got here because this method was fully profiled in the interpreter. 766 next_level = CompLevel_full_optimization; 767 } else { 768 MethodData* mdo = method->method_data(); 769 if (mdo != NULL) { 770 if (mdo->would_profile()) { 771 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= 772 Tier3DelayOff * compiler_count(CompLevel_full_optimization) && 773 (this->*p)(i, b, cur_level, method))) { 774 next_level = CompLevel_full_profile; 775 } 776 } else { 777 next_level = CompLevel_full_optimization; 778 } 779 } else { 780 // If there is no MDO we need to profile 781 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= 782 Tier3DelayOff * compiler_count(CompLevel_full_optimization) && 783 (this->*p)(i, b, cur_level, method))) { 784 next_level = CompLevel_full_profile; 785 } 786 } 787 } 788 break; 789 case CompLevel_full_profile: 790 { 791 MethodData* mdo = method->method_data(); 792 if (mdo != NULL) { 793 if (mdo->would_profile()) { 794 int mdo_i = mdo->invocation_count_delta(); 795 int mdo_b = mdo->backedge_count_delta(); 796 if ((this->*p)(mdo_i, mdo_b, cur_level, method)) { 797 next_level = CompLevel_full_optimization; 798 } 799 } else { 800 next_level = CompLevel_full_optimization; 801 } 802 } 803 } 804 break; 805 } 806 } 807 return MIN2(next_level, (CompLevel)TieredStopAtLevel); 808 } 809 810 // Determine if a method should be compiled with a normal entry point at a different level. 811 CompLevel TieredThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) { 812 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), 813 common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true)); 814 CompLevel next_level = common(&TieredThresholdPolicy::call_predicate, method, cur_level); 815 816 // If OSR method level is greater than the regular method level, the levels should be 817 // equalized by raising the regular method level in order to avoid OSRs during each 818 // invocation of the method. 819 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) { 820 MethodData* mdo = method->method_data(); 821 guarantee(mdo != NULL, "MDO should not be NULL"); 822 if (mdo->invocation_count() >= 1) { 823 next_level = CompLevel_full_optimization; 824 } 825 } else { 826 next_level = MAX2(osr_level, next_level); 827 } 828 #if INCLUDE_JVMCI 829 if (UseJVMCICompiler) { 830 next_level = JVMCI::adjust_comp_level(method, false, next_level, thread); 831 } 832 #endif 833 return next_level; 834 } 835 836 // Determine if we should do an OSR compilation of a given method. 837 CompLevel TieredThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread* thread) { 838 CompLevel next_level = common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true); 839 if (cur_level == CompLevel_none) { 840 // If there is a live OSR method that means that we deopted to the interpreter 841 // for the transition. 842 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level); 843 if (osr_level > CompLevel_none) { 844 return osr_level; 845 } 846 } 847 #if INCLUDE_JVMCI 848 if (UseJVMCICompiler) { 849 next_level = JVMCI::adjust_comp_level(method, true, next_level, thread); 850 } 851 #endif 852 return next_level; 853 } 854 855 bool TieredThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) { 856 if (UseAOT) { 857 if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) { 858 // If the current level is full profile or interpreter and we're switching to any other level, 859 // activate the AOT code back first so that we won't waste time overprofiling. 860 compile(mh, InvocationEntryBci, CompLevel_aot, thread); 861 // Fall through for JIT compilation. 862 } 863 if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) { 864 // If the next level is limited profile, use the aot code (if there is any), 865 // since it's essentially the same thing. 866 compile(mh, InvocationEntryBci, CompLevel_aot, thread); 867 // Not need to JIT, we're done. 868 return true; 869 } 870 } 871 return false; 872 } 873 874 875 // Handle the invocation event. 876 void TieredThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, 877 CompLevel level, CompiledMethod* nm, JavaThread* thread) { 878 if (should_create_mdo(mh(), level)) { 879 create_mdo(mh, thread); 880 } 881 CompLevel next_level = call_event(mh(), level, thread); 882 if (next_level != level) { 883 if (maybe_switch_to_aot(mh, level, next_level, thread)) { 884 // No JITting necessary 885 return; 886 } 887 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { 888 compile(mh, InvocationEntryBci, next_level, thread); 889 } 890 } 891 } 892 893 // Handle the back branch event. Notice that we can compile the method 894 // with a regular entry from here. 895 void TieredThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, 896 int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) { 897 if (should_create_mdo(mh(), level)) { 898 create_mdo(mh, thread); 899 } 900 // Check if MDO should be created for the inlined method 901 if (should_create_mdo(imh(), level)) { 902 create_mdo(imh, thread); 903 } 904 905 if (is_compilation_enabled()) { 906 CompLevel next_osr_level = loop_event(imh(), level, thread); 907 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); 908 // At the very least compile the OSR version 909 if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) { 910 compile(imh, bci, next_osr_level, thread); 911 } 912 913 // Use loop event as an opportunity to also check if there's been 914 // enough calls. 915 CompLevel cur_level, next_level; 916 if (mh() != imh()) { // If there is an enclosing method 917 if (level == CompLevel_aot) { 918 // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling. 919 if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) { 920 compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread); 921 } 922 } else { 923 // Current loop event level is not AOT 924 guarantee(nm != NULL, "Should have nmethod here"); 925 cur_level = comp_level(mh()); 926 next_level = call_event(mh(), cur_level, thread); 927 928 if (max_osr_level == CompLevel_full_optimization) { 929 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts 930 bool make_not_entrant = false; 931 if (nm->is_osr_method()) { 932 // This is an osr method, just make it not entrant and recompile later if needed 933 make_not_entrant = true; 934 } else { 935 if (next_level != CompLevel_full_optimization) { 936 // next_level is not full opt, so we need to recompile the 937 // enclosing method without the inlinee 938 cur_level = CompLevel_none; 939 make_not_entrant = true; 940 } 941 } 942 if (make_not_entrant) { 943 if (PrintTieredEvents) { 944 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; 945 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); 946 } 947 nm->make_not_entrant(); 948 } 949 } 950 // Fix up next_level if necessary to avoid deopts 951 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { 952 next_level = CompLevel_full_profile; 953 } 954 if (cur_level != next_level) { 955 if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { 956 compile(mh, InvocationEntryBci, next_level, thread); 957 } 958 } 959 } 960 } else { 961 cur_level = comp_level(mh()); 962 next_level = call_event(mh(), cur_level, thread); 963 if (next_level != cur_level) { 964 if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { 965 compile(mh, InvocationEntryBci, next_level, thread); 966 } 967 } 968 } 969 } 970 } 971 972 #endif