1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.inline.hpp" 27 #include "code/compiledIC.hpp" 28 #include "code/nmethod.hpp" 29 #include "code/scopeDesc.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "oops/methodData.hpp" 33 #include "oops/method.inline.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/nativeLookup.hpp" 36 #include "runtime/compilationPolicy.hpp" 37 #include "runtime/frame.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/rframe.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/thread.hpp" 42 #include "runtime/tieredThresholdPolicy.hpp" 43 #include "runtime/vframe.hpp" 44 #include "runtime/vmOperations.hpp" 45 #include "utilities/events.hpp" 46 #include "utilities/globalDefinitions.hpp" 47 48 #ifdef COMPILER1 49 #include "c1/c1_Compiler.hpp" 50 #endif 51 #ifdef COMPILER2 52 #include "opto/c2compiler.hpp" 53 #endif 54 55 CompilationPolicy* CompilationPolicy::_policy; 56 57 // Determine compilation policy based on command line argument 58 void compilationPolicy_init() { 59 switch(CompilationPolicyChoice) { 60 case 0: 61 CompilationPolicy::set_policy(new SimpleCompPolicy()); 62 break; 63 64 case 1: 65 #ifdef COMPILER2 66 CompilationPolicy::set_policy(new StackWalkCompPolicy()); 67 #else 68 Unimplemented(); 69 #endif 70 break; 71 case 2: 72 #ifdef TIERED 73 CompilationPolicy::set_policy(new TieredThresholdPolicy()); 74 #else 75 Unimplemented(); 76 #endif 77 break; 78 default: 79 fatal("CompilationPolicyChoice must be in the range: [0-2]"); 80 } 81 CompilationPolicy::policy()->initialize(); 82 } 83 84 // Returns true if m must be compiled before executing it 85 // This is intended to force compiles for methods (usually for 86 // debugging) that would otherwise be interpreted for some reason. 87 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) { 88 // Don't allow Xcomp to cause compiles in replay mode 89 if (ReplayCompiles) return false; 90 91 if (m->has_compiled_code()) return false; // already compiled 92 if (!can_be_compiled(m, comp_level)) return false; 93 94 return !UseInterpreter || // must compile all methods 95 (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods 96 } 97 98 void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) { 99 if (must_be_compiled(selected_method)) { 100 // This path is unusual, mostly used by the '-Xcomp' stress test mode. 101 102 // Note: with several active threads, the must_be_compiled may be true 103 // while can_be_compiled is false; remove assert 104 // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile"); 105 if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) { 106 // don't force compilation, resolve was on behalf of compiler 107 return; 108 } 109 if (selected_method->method_holder()->is_not_initialized()) { 110 // 'is_not_initialized' means not only '!is_initialized', but also that 111 // initialization has not been started yet ('!being_initialized') 112 // Do not force compilation of methods in uninitialized classes. 113 // Note that doing this would throw an assert later, 114 // in CompileBroker::compile_method. 115 // We sometimes use the link resolver to do reflective lookups 116 // even before classes are initialized. 117 return; 118 } 119 CompileBroker::compile_method(selected_method, InvocationEntryBci, 120 CompilationPolicy::policy()->initial_compile_level(), 121 methodHandle(), 0, CompileTask::Reason_MustBeCompiled, CHECK); 122 } 123 } 124 125 // Returns true if m is allowed to be compiled 126 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) { 127 // allow any levels for WhiteBox 128 assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level"); 129 130 if (m->is_abstract()) return false; 131 if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false; 132 133 // Math intrinsics should never be compiled as this can lead to 134 // monotonicity problems because the interpreter will prefer the 135 // compiled code to the intrinsic version. This can't happen in 136 // production because the invocation counter can't be incremented 137 // but we shouldn't expose the system to this problem in testing 138 // modes. 139 if (!AbstractInterpreter::can_be_compiled(m)) { 140 return false; 141 } 142 if (comp_level == CompLevel_all) { 143 if (TieredCompilation) { 144 // enough to be compilable at any level for tiered 145 return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization); 146 } else { 147 // must be compilable at available level for non-tiered 148 return !m->is_not_compilable(CompLevel_highest_tier); 149 } 150 } else if (is_compile(comp_level)) { 151 return !m->is_not_compilable(comp_level); 152 } 153 return false; 154 } 155 156 // Returns true if m is allowed to be osr compiled 157 bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) { 158 bool result = false; 159 if (comp_level == CompLevel_all) { 160 if (TieredCompilation) { 161 // enough to be osr compilable at any level for tiered 162 result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization); 163 } else { 164 // must be osr compilable at available level for non-tiered 165 result = !m->is_not_osr_compilable(CompLevel_highest_tier); 166 } 167 } else if (is_compile(comp_level)) { 168 result = !m->is_not_osr_compilable(comp_level); 169 } 170 return (result && can_be_compiled(m, comp_level)); 171 } 172 173 bool CompilationPolicy::is_compilation_enabled() { 174 // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler 175 return CompileBroker::should_compile_new_jobs(); 176 } 177 178 CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) { 179 #if INCLUDE_JVMCI 180 if (UseJVMCICompiler && !BackgroundCompilation) { 181 /* 182 * In blocking compilation mode, the CompileBroker will make 183 * compilations submitted by a JVMCI compiler thread non-blocking. These 184 * compilations should be scheduled after all blocking compilations 185 * to service non-compiler related compilations sooner and reduce the 186 * chance of such compilations timing out. 187 */ 188 for (CompileTask* task = compile_queue->first(); task != NULL; task = task->next()) { 189 if (task->is_blocking()) { 190 return task; 191 } 192 } 193 } 194 #endif 195 return compile_queue->first(); 196 } 197 198 #ifndef PRODUCT 199 void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) { 200 if (TraceOnStackReplacement) { 201 if (osr_nm == NULL) tty->print_cr("compilation failed"); 202 else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm)); 203 } 204 } 205 #endif // !PRODUCT 206 207 void NonTieredCompPolicy::initialize() { 208 // Setup the compiler thread numbers 209 if (CICompilerCountPerCPU) { 210 // Example: if CICompilerCountPerCPU is true, then we get 211 // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine. 212 // May help big-app startup time. 213 _compiler_count = MAX2(log2_int(os::active_processor_count())-1,1); 214 // Make sure there is enough space in the code cache to hold all the compiler buffers 215 size_t buffer_size = 1; 216 #ifdef COMPILER1 217 buffer_size = is_client_compilation_mode_vm() ? Compiler::code_buffer_size() : buffer_size; 218 #endif 219 #ifdef COMPILER2 220 buffer_size = is_server_compilation_mode_vm() ? C2Compiler::initial_code_buffer_size() : buffer_size; 221 #endif 222 int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size; 223 if (_compiler_count > max_count) { 224 // Lower the compiler count such that all buffers fit into the code cache 225 _compiler_count = MAX2(max_count, 1); 226 } 227 FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count); 228 } else { 229 _compiler_count = CICompilerCount; 230 } 231 } 232 233 // Note: this policy is used ONLY if TieredCompilation is off. 234 // compiler_count() behaves the following way: 235 // - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return 236 // zero for the c1 compilation levels in server compilation mode runs 237 // and c2 compilation levels in client compilation mode runs. 238 // - with COMPILER2 not defined it should return zero for c2 compilation levels. 239 // - with COMPILER1 not defined it should return zero for c1 compilation levels. 240 // - if neither is defined - always return zero. 241 int NonTieredCompPolicy::compiler_count(CompLevel comp_level) { 242 assert(!TieredCompilation, "This policy should not be used with TieredCompilation"); 243 if (COMPILER2_PRESENT(is_server_compilation_mode_vm() && is_c2_compile(comp_level) ||) 244 is_client_compilation_mode_vm() && is_c1_compile(comp_level)) { 245 return _compiler_count; 246 } 247 return 0; 248 } 249 250 void NonTieredCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) { 251 // Make sure invocation and backedge counter doesn't overflow again right away 252 // as would be the case for native methods. 253 254 // BUT also make sure the method doesn't look like it was never executed. 255 // Set carry bit and reduce counter's value to min(count, CompileThreshold/2). 256 MethodCounters* mcs = m->method_counters(); 257 assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); 258 mcs->invocation_counter()->set_carry(); 259 mcs->backedge_counter()->set_carry(); 260 261 assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed"); 262 } 263 264 void NonTieredCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) { 265 // Delay next back-branch event but pump up invocation counter to trigger 266 // whole method compilation. 267 MethodCounters* mcs = m->method_counters(); 268 assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); 269 InvocationCounter* i = mcs->invocation_counter(); 270 InvocationCounter* b = mcs->backedge_counter(); 271 272 // Don't set invocation_counter's value too low otherwise the method will 273 // look like immature (ic < ~5300) which prevents the inlining based on 274 // the type profiling. 275 i->set(i->state(), CompileThreshold); 276 // Don't reset counter too low - it is used to check if OSR method is ready. 277 b->set(b->state(), CompileThreshold / 2); 278 } 279 280 // 281 // CounterDecay 282 // 283 // Iterates through invocation counters and decrements them. This 284 // is done at each safepoint. 285 // 286 class CounterDecay : public AllStatic { 287 static jlong _last_timestamp; 288 static void do_method(Method* m) { 289 MethodCounters* mcs = m->method_counters(); 290 if (mcs != NULL) { 291 mcs->invocation_counter()->decay(); 292 } 293 } 294 public: 295 static void decay(); 296 static bool is_decay_needed() { 297 return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; 298 } 299 }; 300 301 jlong CounterDecay::_last_timestamp = 0; 302 303 void CounterDecay::decay() { 304 _last_timestamp = os::javaTimeMillis(); 305 306 // This operation is going to be performed only at the end of a safepoint 307 // and hence GC's will not be going on, all Java mutators are suspended 308 // at this point and hence SystemDictionary_lock is also not needed. 309 assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint"); 310 size_t nclasses = ClassLoaderDataGraph::num_instance_classes(); 311 size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 / 312 CounterHalfLifeTime); 313 for (size_t i = 0; i < classes_per_tick; i++) { 314 InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class(); 315 if (k != NULL) { 316 k->methods_do(do_method); 317 } 318 } 319 } 320 321 // Called at the end of the safepoint 322 void NonTieredCompPolicy::do_safepoint_work() { 323 if(UseCounterDecay && CounterDecay::is_decay_needed()) { 324 CounterDecay::decay(); 325 } 326 } 327 328 void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { 329 ScopeDesc* sd = trap_scope; 330 MethodCounters* mcs; 331 InvocationCounter* c; 332 for (; !sd->is_top(); sd = sd->sender()) { 333 mcs = sd->method()->method_counters(); 334 if (mcs != NULL) { 335 // Reset ICs of inlined methods, since they can trigger compilations also. 336 mcs->invocation_counter()->reset(); 337 } 338 } 339 mcs = sd->method()->method_counters(); 340 if (mcs != NULL) { 341 c = mcs->invocation_counter(); 342 if (is_osr) { 343 // It was an OSR method, so bump the count higher. 344 c->set(c->state(), CompileThreshold); 345 } else { 346 c->reset(); 347 } 348 mcs->backedge_counter()->reset(); 349 } 350 } 351 352 // This method can be called by any component of the runtime to notify the policy 353 // that it's recommended to delay the compilation of this method. 354 void NonTieredCompPolicy::delay_compilation(Method* method) { 355 MethodCounters* mcs = method->method_counters(); 356 if (mcs != NULL) { 357 mcs->invocation_counter()->decay(); 358 mcs->backedge_counter()->decay(); 359 } 360 } 361 362 void NonTieredCompPolicy::disable_compilation(Method* method) { 363 MethodCounters* mcs = method->method_counters(); 364 if (mcs != NULL) { 365 mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); 366 mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); 367 } 368 } 369 370 CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) { 371 return select_task_helper(compile_queue); 372 } 373 374 bool NonTieredCompPolicy::is_mature(Method* method) { 375 MethodData* mdo = method->method_data(); 376 assert(mdo != NULL, "Should be"); 377 uint current = mdo->mileage_of(method); 378 uint initial = mdo->creation_mileage(); 379 if (current < initial) 380 return true; // some sort of overflow 381 uint target; 382 if (ProfileMaturityPercentage <= 0) 383 target = (uint) -ProfileMaturityPercentage; // absolute value 384 else 385 target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 ); 386 return (current >= initial + target); 387 } 388 389 nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, 390 int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) { 391 assert(comp_level == CompLevel_none, "This should be only called from the interpreter"); 392 NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci)); 393 if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) { 394 // If certain JVMTI events (e.g. frame pop event) are requested then the 395 // thread is forced to remain in interpreted code. This is 396 // implemented partly by a check in the run_compiled_code 397 // section of the interpreter whether we should skip running 398 // compiled code, and partly by skipping OSR compiles for 399 // interpreted-only threads. 400 if (bci != InvocationEntryBci) { 401 reset_counter_for_back_branch_event(method); 402 return NULL; 403 } 404 } 405 if (ReplayCompiles) { 406 // Don't trigger other compiles in testing mode 407 if (bci == InvocationEntryBci) { 408 reset_counter_for_invocation_event(method); 409 } else { 410 reset_counter_for_back_branch_event(method); 411 } 412 return NULL; 413 } 414 415 if (bci == InvocationEntryBci) { 416 // when code cache is full, compilation gets switched off, UseCompiler 417 // is set to false 418 if (!method->has_compiled_code() && UseCompiler) { 419 method_invocation_event(method, thread); 420 } else { 421 // Force counter overflow on method entry, even if no compilation 422 // happened. (The method_invocation_event call does this also.) 423 reset_counter_for_invocation_event(method); 424 } 425 // compilation at an invocation overflow no longer goes and retries test for 426 // compiled method. We always run the loser of the race as interpreted. 427 // so return NULL 428 return NULL; 429 } else { 430 // counter overflow in a loop => try to do on-stack-replacement 431 nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); 432 NOT_PRODUCT(trace_osr_request(method, osr_nm, bci)); 433 // when code cache is full, we should not compile any more... 434 if (osr_nm == NULL && UseCompiler) { 435 method_back_branch_event(method, bci, thread); 436 osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); 437 } 438 if (osr_nm == NULL) { 439 reset_counter_for_back_branch_event(method); 440 return NULL; 441 } 442 return osr_nm; 443 } 444 return NULL; 445 } 446 447 #ifndef PRODUCT 448 void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) { 449 if (TraceInvocationCounterOverflow) { 450 MethodCounters* mcs = m->method_counters(); 451 assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); 452 InvocationCounter* ic = mcs->invocation_counter(); 453 InvocationCounter* bc = mcs->backedge_counter(); 454 ResourceMark rm; 455 if (bci == InvocationEntryBci) { 456 tty->print("comp-policy cntr ovfl @ %d in entry of ", bci); 457 } else { 458 tty->print("comp-policy cntr ovfl @ %d in loop of ", bci); 459 } 460 m->print_value(); 461 tty->cr(); 462 ic->print(); 463 bc->print(); 464 if (ProfileInterpreter) { 465 if (bci != InvocationEntryBci) { 466 MethodData* mdo = m->method_data(); 467 if (mdo != NULL) { 468 ProfileData *pd = mdo->bci_to_data(branch_bci); 469 if (pd == NULL) { 470 tty->print_cr("back branch count = N/A (missing ProfileData)"); 471 } else { 472 tty->print_cr("back branch count = %d", pd->as_JumpData()->taken()); 473 } 474 } 475 } 476 } 477 } 478 } 479 480 void NonTieredCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) { 481 if (TraceOnStackReplacement) { 482 ResourceMark rm; 483 tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for "); 484 method->print_short_name(tty); 485 tty->print_cr(" at bci %d", bci); 486 } 487 } 488 #endif // !PRODUCT 489 490 // SimpleCompPolicy - compile current method 491 492 void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) { 493 const int comp_level = CompLevel_highest_tier; 494 const int hot_count = m->invocation_count(); 495 reset_counter_for_invocation_event(m); 496 497 if (is_compilation_enabled() && can_be_compiled(m, comp_level)) { 498 CompiledMethod* nm = m->code(); 499 if (nm == NULL ) { 500 CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, CompileTask::Reason_InvocationCount, thread); 501 } 502 } 503 } 504 505 void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) { 506 const int comp_level = CompLevel_highest_tier; 507 const int hot_count = m->backedge_count(); 508 509 if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) { 510 CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread); 511 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) 512 } 513 } 514 // StackWalkCompPolicy - walk up stack to find a suitable method to compile 515 516 #ifdef COMPILER2 517 const char* StackWalkCompPolicy::_msg = NULL; 518 519 520 // Consider m for compilation 521 void StackWalkCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) { 522 const int comp_level = CompLevel_highest_tier; 523 const int hot_count = m->invocation_count(); 524 reset_counter_for_invocation_event(m); 525 526 if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) { 527 ResourceMark rm(thread); 528 frame fr = thread->last_frame(); 529 assert(fr.is_interpreted_frame(), "must be interpreted"); 530 assert(fr.interpreter_frame_method() == m(), "bad method"); 531 532 RegisterMap reg_map(thread, false); 533 javaVFrame* triggerVF = thread->last_java_vframe(®_map); 534 // triggerVF is the frame that triggered its counter 535 RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m()); 536 537 if (first->top_method()->code() != NULL) { 538 // called obsolete method/nmethod -- no need to recompile 539 } else { 540 GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50); 541 stack->push(first); 542 RFrame* top = findTopInlinableFrame(stack); 543 assert(top != NULL, "findTopInlinableFrame returned null"); 544 CompileBroker::compile_method(top->top_method(), InvocationEntryBci, comp_level, 545 m, hot_count, CompileTask::Reason_InvocationCount, thread); 546 } 547 } 548 } 549 550 void StackWalkCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) { 551 const int comp_level = CompLevel_highest_tier; 552 const int hot_count = m->backedge_count(); 553 554 if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) { 555 CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread); 556 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) 557 } 558 } 559 560 RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) { 561 // go up the stack until finding a frame that (probably) won't be inlined 562 // into its caller 563 RFrame* current = stack->at(0); // current choice for stopping 564 assert( current && !current->is_compiled(), "" ); 565 const char* msg = NULL; 566 567 while (1) { 568 569 // before going up the stack further, check if doing so would get us into 570 // compiled code 571 RFrame* next = senderOf(current, stack); 572 if( !next ) // No next frame up the stack? 573 break; // Then compile with current frame 574 575 Method* m = current->top_method(); 576 Method* next_m = next->top_method(); 577 578 if( !Inline ) { // Inlining turned off 579 msg = "Inlining turned off"; 580 break; 581 } 582 if (next_m->is_not_compilable()) { // Did fail to compile this before/ 583 msg = "caller not compilable"; 584 break; 585 } 586 if (next->num() > MaxRecompilationSearchLength) { 587 // don't go up too high when searching for recompilees 588 msg = "don't go up any further: > MaxRecompilationSearchLength"; 589 break; 590 } 591 if (next->distance() > MaxInterpretedSearchLength) { 592 // don't go up too high when searching for recompilees 593 msg = "don't go up any further: next > MaxInterpretedSearchLength"; 594 break; 595 } 596 // Compiled frame above already decided not to inline; 597 // do not recompile him. 598 if (next->is_compiled()) { 599 msg = "not going up into optimized code"; 600 break; 601 } 602 603 // Interpreted frame above us was already compiled. Do not force 604 // a recompile, although if the frame above us runs long enough an 605 // OSR might still happen. 606 if( current->is_interpreted() && next_m->has_compiled_code() ) { 607 msg = "not going up -- already compiled caller"; 608 break; 609 } 610 611 // Compute how frequent this call site is. We have current method 'm'. 612 // We know next method 'next_m' is interpreted. Find the call site and 613 // check the various invocation counts. 614 int invcnt = 0; // Caller counts 615 if (ProfileInterpreter) { 616 invcnt = next_m->interpreter_invocation_count(); 617 } 618 int cnt = 0; // Call site counts 619 if (ProfileInterpreter && next_m->method_data() != NULL) { 620 ResourceMark rm; 621 int bci = next->top_vframe()->bci(); 622 ProfileData* data = next_m->method_data()->bci_to_data(bci); 623 if (data != NULL && data->is_CounterData()) 624 cnt = data->as_CounterData()->count(); 625 } 626 627 // Caller counts / call-site counts; i.e. is this call site 628 // a hot call site for method next_m? 629 int freq = (invcnt) ? cnt/invcnt : cnt; 630 631 // Check size and frequency limits 632 if ((msg = shouldInline(m, freq, cnt)) != NULL) { 633 break; 634 } 635 // Check inlining negative tests 636 if ((msg = shouldNotInline(m)) != NULL) { 637 break; 638 } 639 640 641 // If the caller method is too big or something then we do not want to 642 // compile it just to inline a method 643 if (!can_be_compiled(next_m, CompLevel_any)) { 644 msg = "caller cannot be compiled"; 645 break; 646 } 647 648 if( next_m->name() == vmSymbols::class_initializer_name() ) { 649 msg = "do not compile class initializer (OSR ok)"; 650 break; 651 } 652 653 current = next; 654 } 655 656 assert( !current || !current->is_compiled(), "" ); 657 658 return current; 659 } 660 661 RFrame* StackWalkCompPolicy::senderOf(RFrame* rf, GrowableArray<RFrame*>* stack) { 662 RFrame* sender = rf->caller(); 663 if (sender && sender->num() == stack->length()) stack->push(sender); 664 return sender; 665 } 666 667 668 const char* StackWalkCompPolicy::shouldInline(const methodHandle& m, float freq, int cnt) { 669 // Allows targeted inlining 670 // positive filter: should send be inlined? returns NULL (--> yes) 671 // or rejection msg 672 int max_size = MaxInlineSize; 673 int cost = m->code_size(); 674 675 // Check for too many throws (and not too huge) 676 if (m->interpreter_throwout_count() > InlineThrowCount && cost < InlineThrowMaxSize ) { 677 return NULL; 678 } 679 680 // bump the max size if the call is frequent 681 if ((freq >= InlineFrequencyRatio) || (cnt >= InlineFrequencyCount)) { 682 if (TraceFrequencyInlining) { 683 tty->print("(Inlined frequent method)\n"); 684 m->print(); 685 } 686 max_size = FreqInlineSize; 687 } 688 if (cost > max_size) { 689 return (_msg = "too big"); 690 } 691 return NULL; 692 } 693 694 695 const char* StackWalkCompPolicy::shouldNotInline(const methodHandle& m) { 696 // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg 697 if (m->is_abstract()) return (_msg = "abstract method"); 698 // note: we allow ik->is_abstract() 699 if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized"); 700 if (m->is_native()) return (_msg = "native method"); 701 CompiledMethod* m_code = m->code(); 702 if (m_code != NULL && m_code->code_size() > InlineSmallCode) 703 return (_msg = "already compiled into a big method"); 704 705 // use frequency-based objections only for non-trivial methods 706 if (m->code_size() <= MaxTrivialSize) return NULL; 707 if (UseInterpreter) { // don't use counts with -Xcomp 708 if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed"); 709 if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times"); 710 } 711 if (Method::has_unloaded_classes_in_signature(m, JavaThread::current())) return (_msg = "unloaded signature classes"); 712 713 return NULL; 714 } 715 716 717 718 #endif // COMPILER2