17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_OOPS_METHODCOUNTERS_HPP
26 #define SHARE_VM_OOPS_METHODCOUNTERS_HPP
27
28 #include "oops/metadata.hpp"
29 #include "compiler/compilerOracle.hpp"
30 #include "interpreter/invocationCounter.hpp"
31 #include "runtime/arguments.hpp"
32
33 class MethodCounters: public MetaspaceObj {
34 friend class VMStructs;
35 friend class JVMCIVMStructs;
36 private:
37 Method* _method; // Back link to Method
38 #if defined(COMPILER2) || INCLUDE_JVMCI
39 int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
40 u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
41 #endif
42 #if INCLUDE_JVMTI
43 u2 _number_of_breakpoints; // fullspeed debugging support
44 #endif
45 InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
46 InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
47 // NMethod age is a counter for warm methods detection in the code cache sweeper.
48 // The counter is reset by the sweeper and is decremented by some of the compiled
49 // code. The counter values are interpreted as follows:
50 // 1. (HotMethodDetection..INT_MAX] - initial value, no counters inserted
51 // 2. [1..HotMethodDetectionLimit) - the method is warm, the counter is used
52 // to figure out which methods can be flushed.
53 // 3. (INT_MIN..0] - method is hot and will deopt and get
54 // recompiled without the counters
55 int _nmethod_age;
56 int _interpreter_invocation_limit; // per-method InterpreterInvocationLimit
57 int _interpreter_backward_branch_limit; // per-method InterpreterBackwardBranchLimit
58 int _interpreter_profile_limit; // per-method InterpreterProfileLimit
59 int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
60 int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
61 #ifdef TIERED
62 float _rate; // Events (invocation and backedge counter increments) per millisecond
63 jlong _prev_time; // Previous time the rate was acquired
64 u1 _highest_comp_level; // Highest compile level this method has ever seen.
65 u1 _highest_osr_comp_level; // Same for OSR level
66 #endif
67
68 MethodCounters(methodHandle mh) : _method(mh()),
69 _nmethod_age(INT_MAX)
70 #ifdef TIERED
71 , _rate(0),
72 _prev_time(0),
73 _highest_comp_level(0),
74 _highest_osr_comp_level(0)
75 #endif
76 {
77 set_interpreter_invocation_count(0);
78 set_interpreter_throwout_count(0);
79 JVMTI_ONLY(clear_number_of_breakpoints());
80 invocation_counter()->init();
81 backedge_counter()->init();
82
83 if (StressCodeAging) {
84 set_nmethod_age(HotMethodDetectionLimit);
85 }
86
87 // Set per-method thresholds.
88 double scale = 1.0;
92 _interpreter_invocation_limit = compile_threshold << InvocationCounter::count_shift;
93 if (ProfileInterpreter) {
94 // If interpreter profiling is enabled, the backward branch limit
95 // is compared against the method data counter rather than an invocation
96 // counter, therefore no shifting of bits is required.
97 _interpreter_backward_branch_limit = (compile_threshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
98 } else {
99 _interpreter_backward_branch_limit = ((compile_threshold * OnStackReplacePercentage) / 100) << InvocationCounter::count_shift;
100 }
101 _interpreter_profile_limit = ((compile_threshold * InterpreterProfilePercentage) / 100) << InvocationCounter::count_shift;
102 _invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
103 _backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
104 }
105
106 public:
107 static MethodCounters* allocate(methodHandle mh, TRAPS);
108
109 void deallocate_contents(ClassLoaderData* loader_data) {}
110 DEBUG_ONLY(bool on_stack() { return false; }) // for template
111
112 Method* method() const { return _method; }
113
114 static int size() { return sizeof(MethodCounters) / wordSize; }
115
116 bool is_klass() const { return false; }
117
118 void clear_counters();
119
120 #if defined(COMPILER2) || INCLUDE_JVMCI
121
122 int interpreter_invocation_count() {
123 return _interpreter_invocation_count;
124 }
125 void set_interpreter_invocation_count(int count) {
126 _interpreter_invocation_count = count;
127 }
128 int increment_interpreter_invocation_count() {
129 return ++_interpreter_invocation_count;
130 }
131
132 void interpreter_throwout_increment() {
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_OOPS_METHODCOUNTERS_HPP
26 #define SHARE_VM_OOPS_METHODCOUNTERS_HPP
27
28 #include "oops/metadata.hpp"
29 #include "compiler/compilerOracle.hpp"
30 #include "interpreter/invocationCounter.hpp"
31 #include "runtime/arguments.hpp"
32
33 class MethodCounters: public MetaspaceObj {
34 friend class VMStructs;
35 friend class JVMCIVMStructs;
36 private:
37 #if INCLUDE_AOT
38 Method* _method; // Back link to Method
39 #endif
40 #if defined(COMPILER2) || INCLUDE_JVMCI
41 int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
42 u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
43 #endif
44 #if INCLUDE_JVMTI
45 u2 _number_of_breakpoints; // fullspeed debugging support
46 #endif
47 InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
48 InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
49 // NMethod age is a counter for warm methods detection in the code cache sweeper.
50 // The counter is reset by the sweeper and is decremented by some of the compiled
51 // code. The counter values are interpreted as follows:
52 // 1. (HotMethodDetection..INT_MAX] - initial value, no counters inserted
53 // 2. [1..HotMethodDetectionLimit) - the method is warm, the counter is used
54 // to figure out which methods can be flushed.
55 // 3. (INT_MIN..0] - method is hot and will deopt and get
56 // recompiled without the counters
57 int _nmethod_age;
58 int _interpreter_invocation_limit; // per-method InterpreterInvocationLimit
59 int _interpreter_backward_branch_limit; // per-method InterpreterBackwardBranchLimit
60 int _interpreter_profile_limit; // per-method InterpreterProfileLimit
61 int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
62 int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
63 #ifdef TIERED
64 float _rate; // Events (invocation and backedge counter increments) per millisecond
65 jlong _prev_time; // Previous time the rate was acquired
66 u1 _highest_comp_level; // Highest compile level this method has ever seen.
67 u1 _highest_osr_comp_level; // Same for OSR level
68 #endif
69
70 MethodCounters(methodHandle mh) :
71 #if INCLUDE_AOT
72 _method(mh()),
73 #endif
74 _nmethod_age(INT_MAX)
75 #ifdef TIERED
76 , _rate(0),
77 _prev_time(0),
78 _highest_comp_level(0),
79 _highest_osr_comp_level(0)
80 #endif
81 {
82 set_interpreter_invocation_count(0);
83 set_interpreter_throwout_count(0);
84 JVMTI_ONLY(clear_number_of_breakpoints());
85 invocation_counter()->init();
86 backedge_counter()->init();
87
88 if (StressCodeAging) {
89 set_nmethod_age(HotMethodDetectionLimit);
90 }
91
92 // Set per-method thresholds.
93 double scale = 1.0;
97 _interpreter_invocation_limit = compile_threshold << InvocationCounter::count_shift;
98 if (ProfileInterpreter) {
99 // If interpreter profiling is enabled, the backward branch limit
100 // is compared against the method data counter rather than an invocation
101 // counter, therefore no shifting of bits is required.
102 _interpreter_backward_branch_limit = (compile_threshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
103 } else {
104 _interpreter_backward_branch_limit = ((compile_threshold * OnStackReplacePercentage) / 100) << InvocationCounter::count_shift;
105 }
106 _interpreter_profile_limit = ((compile_threshold * InterpreterProfilePercentage) / 100) << InvocationCounter::count_shift;
107 _invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
108 _backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
109 }
110
111 public:
112 static MethodCounters* allocate(methodHandle mh, TRAPS);
113
114 void deallocate_contents(ClassLoaderData* loader_data) {}
115 DEBUG_ONLY(bool on_stack() { return false; }) // for template
116
117 AOT_ONLY(Method* method() const { return _method; })
118
119 static int size() { return sizeof(MethodCounters) / wordSize; }
120
121 bool is_klass() const { return false; }
122
123 void clear_counters();
124
125 #if defined(COMPILER2) || INCLUDE_JVMCI
126
127 int interpreter_invocation_count() {
128 return _interpreter_invocation_count;
129 }
130 void set_interpreter_invocation_count(int count) {
131 _interpreter_invocation_count = count;
132 }
133 int increment_interpreter_invocation_count() {
134 return ++_interpreter_invocation_count;
135 }
136
137 void interpreter_throwout_increment() {
|