src/share/vm/runtime/advancedThresholdPolicy.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File
*** old/src/share/vm/runtime/advancedThresholdPolicy.cpp Thu Nov 3 14:16:49 2016
--- new/src/share/vm/runtime/advancedThresholdPolicy.cpp Thu Nov 3 14:16:49 2016
*** 22,32 ****
--- 22,31 ----
*
*/
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "compiler/compileTask.hpp"
#include "runtime/advancedThresholdPolicy.hpp"
#include "runtime/simpleThresholdPolicy.inline.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmciRuntime.hpp"
#endif
*** 204,214 ****
--- 203,212 ----
// Blocking tasks and tasks submitted from whitebox API don't become stale
if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
if (PrintTieredEvents) {
print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
}
task->log_task_dequeued("stale");
compile_queue->remove_and_mark_stale(task);
method->clear_queued_for_compilation();
task = next_task;
continue;
}
*** 274,283 ****
--- 272,285 ----
// Tier?LoadFeedback is basically a coefficient that determines of
// how many methods per compiler thread can be in the queue before
// the threshold values double.
bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) {
+ case CompLevel_aot: {
+ double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
+ return loop_predicate_helper<CompLevel_aot>(i, b, k, method);
+ }
case CompLevel_none:
case CompLevel_limited_profile: {
double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
return loop_predicate_helper<CompLevel_none>(i, b, k, method);
}
*** 290,299 ****
--- 292,305 ----
}
}
bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) {
+ case CompLevel_aot: {
+ double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
+ return call_predicate_helper<CompLevel_aot>(i, b, k, method);
+ }
case CompLevel_none:
case CompLevel_limited_profile: {
double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
return call_predicate_helper<CompLevel_none>(i, b, k, method);
}
*** 392,414 ****
--- 398,437 ----
if (is_trivial(method)) {
next_level = CompLevel_simple;
} else {
switch(cur_level) {
+ case CompLevel_aot: {
+ // If we were at full profile level, would we switch to full opt?
+ if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
+ next_level = CompLevel_full_optimization;
+ } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
+ Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
+ (this->*p)(i, b, cur_level, method))) {
+ next_level = CompLevel_full_profile;
+ }
+ }
+ break;
case CompLevel_none:
// If we were at full profile level, would we switch to full opt?
if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
next_level = CompLevel_full_optimization;
} else if ((this->*p)(i, b, cur_level, method)) {
#if INCLUDE_JVMCI
! if (EnableJVMCI && UseJVMCICompiler) {
// Since JVMCI takes a while to warm up, its queue inevitably backs up during
! // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root
+ // compilation method and all potential inlinees have mature profiles (which
+ // includes type profiling). If it sees immature profiles, JVMCI's inliner
+ // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to
+ // exploring/inlining too many graphs). Since a rewrite of the inliner is
+ // in progress, we simply disable the dialing back heuristic for now and will
+ // revisit this decision once the new inliner is completed.
next_level = CompLevel_full_profile;
break;
}
+ } else
#endif
+ {
// C1-generated fully profiled code is about 30% slower than the limited profile
// code that has only invocation and backedge counters. The observation is that
// if C2 queue is large enough we can spend too much time in the fully profiled code
// while waiting for C2 to pick the method from the queue. To alleviate this problem
// we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
*** 419,428 ****
--- 442,452 ----
next_level = CompLevel_limited_profile;
} else {
next_level = CompLevel_full_profile;
}
}
+ }
break;
case CompLevel_limited_profile:
if (is_method_profiled(method)) {
// Special case: we got here because this method was fully profiled in the interpreter.
next_level = CompLevel_full_optimization;
*** 436,445 ****
--- 460,476 ----
next_level = CompLevel_full_profile;
}
} else {
next_level = CompLevel_full_optimization;
}
+ } else {
+ // If there is no MDO we need to profile
+ if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
+ Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
+ (this->*p)(i, b, cur_level, method))) {
+ next_level = CompLevel_full_profile;
+ }
}
}
break;
case CompLevel_full_profile:
{
*** 512,530 ****
--- 543,585 ----
int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
update_rate(os::javaTimeMillis(), mh());
CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread);
}
+ bool AdvancedThresholdPolicy::maybe_switch_to_aot(methodHandle mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) {
+ if (UseAOT && !delay_compilation_during_startup()) {
+ if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) {
+ // If the current level is full profile or interpreter and we're switching to any other level,
+ // activate the AOT code back first so that we won't waste time overprofiling.
+ compile(mh, InvocationEntryBci, CompLevel_aot, thread);
+ // Fall through for JIT compilation.
+ }
+ if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) {
+ // If the next level is limited profile, use the aot code (if there is any),
+ // since it's essentially the same thing.
+ compile(mh, InvocationEntryBci, CompLevel_aot, thread);
+ // Not need to JIT, we're done.
+ return true;
+ }
+ }
+ return false;
+ }
+
+
// Handle the invocation event.
void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
CompLevel level, CompiledMethod* nm, JavaThread* thread) {
if (should_create_mdo(mh(), level)) {
create_mdo(mh, thread);
}
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
CompLevel next_level = call_event(mh(), level, thread);
if (next_level != level) {
+ if (maybe_switch_to_aot(mh, level, next_level, thread)) {
+ // No JITting necessary
+ return;
+ }
+ if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
compile(mh, InvocationEntryBci, next_level, thread);
}
}
}
*** 550,559 ****
--- 605,621 ----
// Use loop event as an opportunity to also check if there's been
// enough calls.
CompLevel cur_level, next_level;
if (mh() != imh()) { // If there is an enclosing method
+ if (level == CompLevel_aot) {
+ // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
+ if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
+ compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread);
+ }
+ } else {
+ // Current loop event level is not AOT
guarantee(nm != NULL, "Should have nmethod here");
cur_level = comp_level(mh());
next_level = call_event(mh(), cur_level, thread);
if (max_osr_level == CompLevel_full_optimization) {
*** 576,599 ****
--- 638,664 ----
print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
}
nm->make_not_entrant();
}
}
if (!CompileBroker::compilation_is_in_queue(mh)) {
// Fix up next_level if necessary to avoid deopts
if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
next_level = CompLevel_full_profile;
}
if (cur_level != next_level) {
+ if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
compile(mh, InvocationEntryBci, next_level, thread);
}
}
+ }
} else {
- cur_level = comp_level(imh());
- next_level = call_event(imh(), cur_level, thread);
! if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
compile(imh, InvocationEntryBci, next_level, thread);
+ if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
+ compile(mh, InvocationEntryBci, next_level, thread);
+ }
}
}
}
}
src/share/vm/runtime/advancedThresholdPolicy.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File