--- old/src/hotspot/share/interpreter/invocationCounter.cpp 2019-01-30 13:21:42.663863252 +0800 +++ new/src/hotspot/share/interpreter/invocationCounter.cpp 2019-01-30 13:21:41.814063017 +0800 @@ -79,6 +79,10 @@ int InvocationCounter::_init [InvocationCounter::number_of_states]; InvocationCounter::Action InvocationCounter::_action[InvocationCounter::number_of_states]; +#ifdef CC_INTERP +int InvocationCounter::InterpreterInvocationLimit; +int InvocationCounter::InterpreterBackwardBranchLimit; +#endif const char* InvocationCounter::state_as_string(State state) { switch (state) { @@ -132,6 +136,20 @@ guarantee((int)number_of_states <= (int)state_limit, "adjust number_of_state_bits"); def(wait_for_nothing, 0, do_nothing); def(wait_for_compile, 0, do_decay); + +#ifdef CC_INTERP + InterpreterInvocationLimit = CompileThreshold << number_of_noncount_bits; + + // When methodData is collected, the backward branch limit is compared against a + // methodData counter, rather than an InvocationCounter. In the former case, we + // don't need the shift by number_of_noncount_bits, but we do need to adjust + // the factor by which we scale the threshold. + if (ProfileInterpreter) { + InterpreterBackwardBranchLimit = (int)((int64_t)CompileThreshold * (OnStackReplacePercentage - InterpreterProfilePercentage) / 100); + } else { + InterpreterBackwardBranchLimit = (int)(((int64_t)CompileThreshold * OnStackReplacePercentage / 100) << number_of_noncount_bits); + } +#endif } void invocationCounter_init() {