src/share/vm/runtime/advancedThresholdPolicy.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/runtime

src/share/vm/runtime/advancedThresholdPolicy.cpp

Print this page




 145       if (weight(x) > weight(y)) {
 146         return true;
 147       }
 148     }
 149   return false;
 150 }
 151 
 152 // Is method profiled enough?
 153 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
 154   MethodData* mdo = method->method_data();
 155   if (mdo != NULL) {
 156     int i = mdo->invocation_count_delta();
 157     int b = mdo->backedge_count_delta();
 158     return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
 159   }
 160   return false;
 161 }
 162 
 163 // Called with the queue locked and with at least one element
 164 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {



 165   CompileTask *max_task = NULL;
 166   Method* max_method = NULL;
 167   jlong t = os::javaTimeMillis();
 168   // Iterate through the queue and find a method with a maximum rate.
 169   for (CompileTask* task = compile_queue->first(); task != NULL;) {
 170     CompileTask* next_task = task->next();
 171     Method* method = task->method();
 172     update_rate(t, method);
 173     if (max_task == NULL) {
 174       max_task = task;
 175       max_method = method;
 176     } else {
 177       // If a method has been stale for some time, remove it from the queue.
 178       if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
 179         if (PrintTieredEvents) {
 180           print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
 181         }

 182         compile_queue->remove_and_mark_stale(task);
 183         method->clear_queued_for_compilation();
 184         task = next_task;
 185         continue;
 186       }
 187 
 188       // Select a method with a higher rate
 189       if (compare_methods(method, max_method)) {
 190         max_task = task;
 191         max_method = method;
 192       }
 193     }
 194     task = next_task;
 195   }
 196 









 197   if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
 198       && is_method_profiled(max_method)) {
 199     max_task->set_comp_level(CompLevel_limited_profile);
 200     if (PrintTieredEvents) {
 201       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 202     }
 203   }
 204 
 205   return max_task;
 206 }
 207 
 208 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
 209   double queue_size = CompileBroker::queue_size(level);
 210   int comp_count = compiler_count(level);
 211   double k = queue_size / (feedback_k * comp_count) + 1;
 212 
 213   // Increase C1 compile threshold when the code cache is filled more
 214   // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 215   // The main intention is to keep enough free space for C2 compiled code
 216   // to achieve peak performance if the code cache is under stress.


 337  * Note that since state 0 can be reached from any other state via deoptimization different loops
 338  * are possible.
 339  *
 340  */
 341 
 342 // Common transition function. Given a predicate determines if a method should transition to another level.
 343 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
 344   CompLevel next_level = cur_level;
 345   int i = method->invocation_count();
 346   int b = method->backedge_count();
 347 
 348   if (is_trivial(method)) {
 349     next_level = CompLevel_simple;
 350   } else {
 351     switch(cur_level) {
 352     case CompLevel_none:
 353       // If we were at full profile level, would we switch to full opt?
 354       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
 355         next_level = CompLevel_full_optimization;
 356       } else if ((this->*p)(i, b, cur_level, method)) {








 357         // C1-generated fully profiled code is about 30% slower than the limited profile
 358         // code that has only invocation and backedge counters. The observation is that
 359         // if C2 queue is large enough we can spend too much time in the fully profiled code
 360         // while waiting for C2 to pick the method from the queue. To alleviate this problem
 361         // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
 362         // we choose to compile a limited profiled version and then recompile with full profiling
 363         // when the load on C2 goes down.
 364         if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
 365                                  Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 366           next_level = CompLevel_limited_profile;
 367         } else {
 368           next_level = CompLevel_full_profile;
 369         }
 370       }
 371       break;
 372     case CompLevel_limited_profile:
 373       if (is_method_profiled(method)) {
 374         // Special case: we got here because this method was fully profiled in the interpreter.
 375         next_level = CompLevel_full_optimization;
 376       } else {




 145       if (weight(x) > weight(y)) {
 146         return true;
 147       }
 148     }
 149   return false;
 150 }
 151 
 152 // Is method profiled enough?
 153 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
 154   MethodData* mdo = method->method_data();
 155   if (mdo != NULL) {
 156     int i = mdo->invocation_count_delta();
 157     int b = mdo->backedge_count_delta();
 158     return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
 159   }
 160   return false;
 161 }
 162 
 163 // Called with the queue locked and with at least one element
 164 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
 165 #if INCLUDE_JVMCI
 166   CompileTask *max_non_jvmci_task = NULL;
 167 #endif
 168   CompileTask *max_task = NULL;
 169   Method* max_method = NULL;
 170   jlong t = os::javaTimeMillis();
 171   // Iterate through the queue and find a method with a maximum rate.
 172   for (CompileTask* task = compile_queue->first(); task != NULL;) {
 173     CompileTask* next_task = task->next();
 174     Method* method = task->method();
 175     update_rate(t, method);
 176     if (max_task == NULL) {
 177       max_task = task;
 178       max_method = method;
 179     } else {
 180       // If a method has been stale for some time, remove it from the queue.
 181       if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
 182         if (PrintTieredEvents) {
 183           print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
 184         }
 185         task->log_task_dequeued("stale");
 186         compile_queue->remove_and_mark_stale(task);
 187         method->clear_queued_for_compilation();
 188         task = next_task;
 189         continue;
 190       }
 191 
 192       // Select a method with a higher rate
 193       if (compare_methods(method, max_method)) {
 194         max_task = task;
 195         max_method = method;
 196       }
 197     }
 198     task = next_task;
 199   }
 200 
 201 #if INCLUDE_JVMCI
 202   if (UseJVMCICompiler) {
 203     if (max_non_jvmci_task != NULL) {
 204       max_task = max_non_jvmci_task;
 205       max_method = max_task->method();
 206     }
 207   }
 208 #endif
 209 
 210   if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
 211       && is_method_profiled(max_method)) {
 212     max_task->set_comp_level(CompLevel_limited_profile);
 213     if (PrintTieredEvents) {
 214       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 215     }
 216   }
 217 
 218   return max_task;
 219 }
 220 
 221 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
 222   double queue_size = CompileBroker::queue_size(level);
 223   int comp_count = compiler_count(level);
 224   double k = queue_size / (feedback_k * comp_count) + 1;
 225 
 226   // Increase C1 compile threshold when the code cache is filled more
 227   // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 228   // The main intention is to keep enough free space for C2 compiled code
 229   // to achieve peak performance if the code cache is under stress.


 350  * Note that since state 0 can be reached from any other state via deoptimization different loops
 351  * are possible.
 352  *
 353  */
 354 
 355 // Common transition function. Given a predicate determines if a method should transition to another level.
 356 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
 357   CompLevel next_level = cur_level;
 358   int i = method->invocation_count();
 359   int b = method->backedge_count();
 360 
 361   if (is_trivial(method)) {
 362     next_level = CompLevel_simple;
 363   } else {
 364     switch(cur_level) {
 365     case CompLevel_none:
 366       // If we were at full profile level, would we switch to full opt?
 367       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
 368         next_level = CompLevel_full_optimization;
 369       } else if ((this->*p)(i, b, cur_level, method)) {
 370 #if INCLUDE_JVMCI
 371         if (UseJVMCICompiler) {
 372           // Since JVMCI takes a while to warm up, its queue inevitably backs up during
 373           // early VM execution.
 374           next_level = CompLevel_full_profile;
 375           break;
 376         }
 377 #endif
 378         // C1-generated fully profiled code is about 30% slower than the limited profile
 379         // code that has only invocation and backedge counters. The observation is that
 380         // if C2 queue is large enough we can spend too much time in the fully profiled code
 381         // while waiting for C2 to pick the method from the queue. To alleviate this problem
 382         // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
 383         // we choose to compile a limited profiled version and then recompile with full profiling
 384         // when the load on C2 goes down.
 385         if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
 386             Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 387           next_level = CompLevel_limited_profile;
 388         } else {
 389           next_level = CompLevel_full_profile;
 390         }
 391       }
 392       break;
 393     case CompLevel_limited_profile:
 394       if (is_method_profiled(method)) {
 395         // Special case: we got here because this method was fully profiled in the interpreter.
 396         next_level = CompLevel_full_optimization;
 397       } else {


src/share/vm/runtime/advancedThresholdPolicy.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File