1 /*
   2  * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "runtime/advancedThresholdPolicy.hpp"
  27 #include "runtime/simpleThresholdPolicy.inline.hpp"
  28 
  29 #ifdef TIERED
  30 // Print an event.
  31 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
  32                                              int bci, CompLevel level) {
  33   tty->print(" rate=");
  34   if (mh->prev_time() == 0) tty->print("n/a");
  35   else tty->print("%f", mh->rate());
  36 
  37   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
  38                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
  39 
  40 }
  41 
  42 void AdvancedThresholdPolicy::initialize() {
  43   // Turn on ergonomic compiler count selection
  44   if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
  45     FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
  46   }
  47   int count = CICompilerCount;
  48   if (CICompilerCountPerCPU) {
  49     // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
  50     int log_cpu = log2_intptr(os::active_processor_count());
  51     int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
  52     count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
  53   }
  54 
  55   set_c1_count(MAX2(count / 3, 1));
  56   set_c2_count(MAX2(count - c1_count(), 1));
  57   FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
  58 
  59   // Some inlining tuning
  60 #ifdef X86
  61   if (FLAG_IS_DEFAULT(InlineSmallCode)) {
  62     FLAG_SET_DEFAULT(InlineSmallCode, 2000);
  63   }
  64 #endif
  65 
  66 #ifdef SPARC
  67   if (FLAG_IS_DEFAULT(InlineSmallCode)) {
  68     FLAG_SET_DEFAULT(InlineSmallCode, 2500);
  69   }
  70 #endif
  71 
  72   set_increase_threshold_at_ratio();
  73   set_start_time(os::javaTimeMillis());
  74 }
  75 
  76 // update_rate() is called from select_task() while holding a compile queue lock.
  77 void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
  78   // Skip update if counters are absent.
  79   // Can't allocate them since we are holding compile queue lock.
  80   if (m->method_counters() == NULL)  return;
  81 
  82   if (is_old(m)) {
  83     // We don't remove old methods from the queue,
  84     // so we can just zero the rate.
  85     m->set_rate(0);
  86     return;
  87   }
  88 
  89   // We don't update the rate if we've just came out of a safepoint.
  90   // delta_s is the time since last safepoint in milliseconds.
  91   jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
  92   jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
  93   // How many events were there since the last time?
  94   int event_count = m->invocation_count() + m->backedge_count();
  95   int delta_e = event_count - m->prev_event_count();
  96 
  97   // We should be running for at least 1ms.
  98   if (delta_s >= TieredRateUpdateMinTime) {
  99     // And we must've taken the previous point at least 1ms before.
 100     if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
 101       m->set_prev_time(t);
 102       m->set_prev_event_count(event_count);
 103       m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
 104     } else {
 105       if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
 106         // If nothing happened for 25ms, zero the rate. Don't modify prev values.
 107         m->set_rate(0);
 108       }
 109     }
 110   }
 111 }
 112 
 113 // Check if this method has been stale from a given number of milliseconds.
 114 // See select_task().
 115 bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
 116   jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
 117   jlong delta_t = t - m->prev_time();
 118   if (delta_t > timeout && delta_s > timeout) {
 119     int event_count = m->invocation_count() + m->backedge_count();
 120     int delta_e = event_count - m->prev_event_count();
 121     // Return true if there were no events.
 122     return delta_e == 0;
 123   }
 124   return false;
 125 }
 126 
 127 // We don't remove old methods from the compile queue even if they have
 128 // very low activity. See select_task().
 129 bool AdvancedThresholdPolicy::is_old(Method* method) {
 130   return method->invocation_count() > 50000 || method->backedge_count() > 500000;
 131 }
 132 
 133 double AdvancedThresholdPolicy::weight(Method* method) {
 134   return (method->rate() + 1) * ((method->invocation_count() + 1) *  (method->backedge_count() + 1));
 135 }
 136 
 137 // Apply heuristics and return true if x should be compiled before y
 138 bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
 139   if (x->highest_comp_level() > y->highest_comp_level()) {
 140     // recompilation after deopt
 141     return true;
 142   } else
 143     if (x->highest_comp_level() == y->highest_comp_level()) {
 144       if (weight(x) > weight(y)) {
 145         return true;
 146       }
 147     }
 148   return false;
 149 }
 150 
 151 // Is method profiled enough?
 152 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
 153   MethodData* mdo = method->method_data();
 154   if (mdo != NULL) {
 155     int i = mdo->invocation_count_delta();
 156     int b = mdo->backedge_count_delta();
 157     return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
 158   }
 159   return false;
 160 }
 161 
 162 // Called with the queue locked and with at least one element
 163 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
 164   CompileTask *max_task = NULL;
 165   Method* max_method = NULL;
 166   jlong t = os::javaTimeMillis();
 167   // Iterate through the queue and find a method with a maximum rate.
 168   for (CompileTask* task = compile_queue->first(); task != NULL;) {
 169     CompileTask* next_task = task->next();
 170     Method* method = task->method();
 171     update_rate(t, method);
 172     if (max_task == NULL) {
 173       max_task = task;
 174       max_method = method;
 175     } else {
 176       // If a method has been stale for some time, remove it from the queue.
 177       if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
 178         if (PrintTieredEvents) {
 179           print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
 180         }
 181         compile_queue->remove_and_mark_stale(task);
 182         method->clear_queued_for_compilation();
 183         task = next_task;
 184         continue;
 185       }
 186 
 187       // Select a method with a higher rate
 188       if (compare_methods(method, max_method)) {
 189         max_task = task;
 190         max_method = method;
 191       }
 192     }
 193     task = next_task;
 194   }
 195 
 196   if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
 197       && is_method_profiled(max_method)) {
 198     max_task->set_comp_level(CompLevel_limited_profile);
 199     if (PrintTieredEvents) {
 200       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 201     }
 202   }
 203 
 204   return max_task;
 205 }
 206 
 207 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
 208   double queue_size = CompileBroker::queue_size(level);
 209   int comp_count = compiler_count(level);
 210   double k = queue_size / (feedback_k * comp_count) + 1;
 211 
 212   // Increase C1 compile threshold when the code cache is filled more
 213   // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 214   // The main intention is to keep enough free space for C2 compiled code
 215   // to achieve peak performance if the code cache is under stress.
 216   if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
 217     double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
 218     if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
 219       k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
 220     }
 221   }
 222   return k;
 223 }
 224 
 225 // Call and loop predicates determine whether a transition to a higher
 226 // compilation level should be performed (pointers to predicate functions
 227 // are passed to common()).
 228 // Tier?LoadFeedback is basically a coefficient that determines of
 229 // how many methods per compiler thread can be in the queue before
 230 // the threshold values double.
 231 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
 232   switch(cur_level) {
 233   case CompLevel_none:
 234   case CompLevel_limited_profile: {
 235     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 236     return loop_predicate_helper<CompLevel_none>(i, b, k);
 237   }
 238   case CompLevel_full_profile: {
 239     double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 240     return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
 241   }
 242   default:
 243     return true;
 244   }
 245 }
 246 
 247 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
 248   switch(cur_level) {
 249   case CompLevel_none:
 250   case CompLevel_limited_profile: {
 251     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 252     return call_predicate_helper<CompLevel_none>(i, b, k);
 253   }
 254   case CompLevel_full_profile: {
 255     double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 256     return call_predicate_helper<CompLevel_full_profile>(i, b, k);
 257   }
 258   default:
 259     return true;
 260   }
 261 }
 262 
 263 // If a method is old enough and is still in the interpreter we would want to
 264 // start profiling without waiting for the compiled method to arrive.
 265 // We also take the load on compilers into the account.
 266 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
 267   if (cur_level == CompLevel_none &&
 268       CompileBroker::queue_size(CompLevel_full_optimization) <=
 269       Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 270     int i = method->invocation_count();
 271     int b = method->backedge_count();
 272     double k = Tier0ProfilingStartPercentage / 100.0;
 273     return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
 274   }
 275   return false;
 276 }
 277 
 278 // Inlining control: if we're compiling a profiled method with C1 and the callee
 279 // is known to have OSRed in a C2 version, don't inline it.
 280 bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
 281   CompLevel comp_level = (CompLevel)env->comp_level();
 282   if (comp_level == CompLevel_full_profile ||
 283       comp_level == CompLevel_limited_profile) {
 284     return callee->highest_osr_comp_level() == CompLevel_full_optimization;
 285   }
 286   return false;
 287 }
 288 
 289 // Create MDO if necessary.
 290 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
 291   if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
 292   if (mh->method_data() == NULL) {
 293     Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
 294   }
 295 }
 296 
 297 
 298 /*
 299  * Method states:
 300  *   0 - interpreter (CompLevel_none)
 301  *   1 - pure C1 (CompLevel_simple)
 302  *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
 303  *   3 - C1 with full profiling (CompLevel_full_profile)
 304  *   4 - C2 (CompLevel_full_optimization)
 305  *
 306  * Common state transition patterns:
 307  * a. 0 -> 3 -> 4.
 308  *    The most common path. But note that even in this straightforward case
 309  *    profiling can start at level 0 and finish at level 3.
 310  *
 311  * b. 0 -> 2 -> 3 -> 4.
 312  *    This case occures when the load on C2 is deemed too high. So, instead of transitioning
 313  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
 314  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
 315  *
 316  * c. 0 -> (3->2) -> 4.
 317  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
 318  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
 319  *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
 320  *    without full profiling while c2 is compiling.
 321  *
 322  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
 323  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
 324  *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
 325  *
 326  * e. 0 -> 4.
 327  *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
 328  *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
 329  *    the compiled version already exists).
 330  *
 331  * Note that since state 0 can be reached from any other state via deoptimization different loops
 332  * are possible.
 333  *
 334  */
 335 
 336 // Common transition function. Given a predicate determines if a method should transition to another level.
 337 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
 338   CompLevel next_level = cur_level;
 339   int i = method->invocation_count();
 340   int b = method->backedge_count();
 341 
 342   if (is_trivial(method)) {
 343     next_level = CompLevel_simple;
 344   } else {
 345     switch(cur_level) {
 346     case CompLevel_none:
 347       // If we were at full profile level, would we switch to full opt?
 348       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
 349         next_level = CompLevel_full_optimization;
 350       } else if ((this->*p)(i, b, cur_level)) {
 351         // C1-generated fully profiled code is about 30% slower than the limited profile
 352         // code that has only invocation and backedge counters. The observation is that
 353         // if C2 queue is large enough we can spend too much time in the fully profiled code
 354         // while waiting for C2 to pick the method from the queue. To alleviate this problem
 355         // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
 356         // we choose to compile a limited profiled version and then recompile with full profiling
 357         // when the load on C2 goes down.
 358         if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
 359                                  Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 360           next_level = CompLevel_limited_profile;
 361         } else {
 362           next_level = CompLevel_full_profile;
 363         }
 364       }
 365       break;
 366     case CompLevel_limited_profile:
 367       if (is_method_profiled(method)) {
 368         // Special case: we got here because this method was fully profiled in the interpreter.
 369         next_level = CompLevel_full_optimization;
 370       } else {
 371         MethodData* mdo = method->method_data();
 372         if (mdo != NULL) {
 373           if (mdo->would_profile()) {
 374             if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
 375                                      Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
 376                                      (this->*p)(i, b, cur_level))) {
 377               next_level = CompLevel_full_profile;
 378             }
 379           } else {
 380             next_level = CompLevel_full_optimization;
 381           }
 382         }
 383       }
 384       break;
 385     case CompLevel_full_profile:
 386       {
 387         MethodData* mdo = method->method_data();
 388         if (mdo != NULL) {
 389           if (mdo->would_profile()) {
 390             int mdo_i = mdo->invocation_count_delta();
 391             int mdo_b = mdo->backedge_count_delta();
 392             if ((this->*p)(mdo_i, mdo_b, cur_level)) {
 393               next_level = CompLevel_full_optimization;
 394             }
 395           } else {
 396             next_level = CompLevel_full_optimization;
 397           }
 398         }
 399       }
 400       break;
 401     }
 402   }
 403   return MIN2(next_level, (CompLevel)TieredStopAtLevel);
 404 }
 405 
 406 // Determine if a method should be compiled with a normal entry point at a different level.
 407 CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) {
 408   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
 409                              common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
 410   CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
 411 
 412   // If OSR method level is greater than the regular method level, the levels should be
 413   // equalized by raising the regular method level in order to avoid OSRs during each
 414   // invocation of the method.
 415   if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
 416     MethodData* mdo = method->method_data();
 417     guarantee(mdo != NULL, "MDO should not be NULL");
 418     if (mdo->invocation_count() >= 1) {
 419       next_level = CompLevel_full_optimization;
 420     }
 421   } else {
 422     next_level = MAX2(osr_level, next_level);
 423   }
 424   return next_level;
 425 }
 426 
 427 // Determine if we should do an OSR compilation of a given method.
 428 CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level) {
 429   CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
 430   if (cur_level == CompLevel_none) {
 431     // If there is a live OSR method that means that we deopted to the interpreter
 432     // for the transition.
 433     CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
 434     if (osr_level > CompLevel_none) {
 435       return osr_level;
 436     }
 437   }
 438   return next_level;
 439 }
 440 
 441 // Update the rate and submit compile
 442 void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
 443   int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
 444   update_rate(os::javaTimeMillis(), mh());
 445   CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
 446 }
 447 
 448 // Handle the invocation event.
 449 void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
 450                                                       CompLevel level, nmethod* nm, JavaThread* thread) {
 451   if (should_create_mdo(mh(), level)) {
 452     create_mdo(mh, thread);
 453   }
 454   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
 455     CompLevel next_level = call_event(mh(), level);
 456     if (next_level != level) {
 457       compile(mh, InvocationEntryBci, next_level, thread);
 458     }
 459   }
 460 }
 461 
 462 // Handle the back branch event. Notice that we can compile the method
 463 // with a regular entry from here.
 464 void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
 465                                                        int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
 466   if (should_create_mdo(mh(), level)) {
 467     create_mdo(mh, thread);
 468   }
 469   // Check if MDO should be created for the inlined method
 470   if (should_create_mdo(imh(), level)) {
 471     create_mdo(imh, thread);
 472   }
 473 
 474   if (is_compilation_enabled()) {
 475     CompLevel next_osr_level = loop_event(imh(), level);
 476     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
 477     // At the very least compile the OSR version
 478     if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
 479       compile(imh, bci, next_osr_level, thread);
 480     }
 481 
 482     // Use loop event as an opportunity to also check if there's been
 483     // enough calls.
 484     CompLevel cur_level, next_level;
 485     if (mh() != imh()) { // If there is an enclosing method
 486       guarantee(nm != NULL, "Should have nmethod here");
 487       cur_level = comp_level(mh());
 488       next_level = call_event(mh(), cur_level);
 489 
 490       if (max_osr_level == CompLevel_full_optimization) {
 491         // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
 492         bool make_not_entrant = false;
 493         if (nm->is_osr_method()) {
 494           // This is an osr method, just make it not entrant and recompile later if needed
 495           make_not_entrant = true;
 496         } else {
 497           if (next_level != CompLevel_full_optimization) {
 498             // next_level is not full opt, so we need to recompile the
 499             // enclosing method without the inlinee
 500             cur_level = CompLevel_none;
 501             make_not_entrant = true;
 502           }
 503         }
 504         if (make_not_entrant) {
 505           if (PrintTieredEvents) {
 506             int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
 507             print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
 508           }
 509           nm->make_not_entrant();
 510         }
 511       }
 512       if (!CompileBroker::compilation_is_in_queue(mh)) {
 513         // Fix up next_level if necessary to avoid deopts
 514         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
 515           next_level = CompLevel_full_profile;
 516         }
 517         if (cur_level != next_level) {
 518           compile(mh, InvocationEntryBci, next_level, thread);
 519         }
 520       }
 521     } else {
 522       cur_level = comp_level(imh());
 523       next_level = call_event(imh(), cur_level);
 524       if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
 525         compile(imh, InvocationEntryBci, next_level, thread);
 526       }
 527     }
 528   }
 529 }
 530 
 531 #endif // TIERED