48 class FlatProfiler;
49 class IntervalData;
50
51 // Declarations of classes defined only in the implementation.
52 class ProfilerNode;
53 class FlatProfilerTask;
54
55 enum TickPosition {
56 tp_code,
57 tp_native
58 };
59
60 // One of these guys is constructed as we enter interesting regions
61 // and destructed as we exit the region. While we are in the region
62 // ticks are allotted to the region.
63 class ThreadProfilerMark: public StackObj {
64 public:
65 // For now, the only thread-specific region is the class loader.
66 enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
67
68 ThreadProfilerMark(Region) KERNEL_RETURN;
69 ~ThreadProfilerMark() KERNEL_RETURN;
70
71 private:
72 ThreadProfiler* _pp;
73 Region _r;
74 };
75
76 #ifndef FPROF_KERNEL
77
78 class IntervalData VALUE_OBJ_CLASS_SPEC {
79 // Just to keep these things all together
80 private:
81 int _interpreted;
82 int _compiled;
83 int _native;
84 int _compiling;
85 public:
86 int interpreted() {
87 return _interpreted;
88 }
89 int compiled() {
90 return _compiled;
91 }
92 int native() {
93 return _native;
94 }
95 int compiling() {
96 return _compiling;
102 _interpreted += 1;
103 }
104 void inc_compiled() {
105 _compiled += 1;
106 }
107 void inc_native() {
108 _native += 1;
109 }
110 void inc_compiling() {
111 _compiling += 1;
112 }
113 void reset() {
114 _interpreted = 0;
115 _compiled = 0;
116 _native = 0;
117 _compiling = 0;
118 }
119 static void print_header(outputStream* st);
120 void print_data(outputStream* st);
121 };
122 #endif // FPROF_KERNEL
123
124 class ThreadProfiler: public CHeapObj<mtInternal> {
125 public:
126 ThreadProfiler() KERNEL_RETURN;
127 ~ThreadProfiler() KERNEL_RETURN;
128
129 // Resets the profiler
130 void reset() KERNEL_RETURN;
131
132 // Activates the profiler for a certain thread
133 void engage() KERNEL_RETURN;
134
135 // Deactivates the profiler
136 void disengage() KERNEL_RETURN;
137
138 // Prints the collected profiling information
139 void print(const char* thread_name) KERNEL_RETURN;
140
141 // Garbage Collection Support
142 void oops_do(OopClosure* f) KERNEL_RETURN;
143
144 #ifndef FPROF_KERNEL
145 private:
146 // for recording ticks.
147 friend class ProfilerNode;
148 char* area_bottom; // preallocated area for pnodes
149 char* area_top;
150 char* area_limit;
151 static int table_size;
152 ProfilerNode** table;
153
154 private:
155 void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
156 void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where);
157 void interpreted_update(Method* method, TickPosition where);
158 void compiled_update (Method* method, TickPosition where);
159 void stub_update (Method* method, const char* name, TickPosition where);
160 void adapter_update (TickPosition where);
161
162 void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
163 void unknown_compiled_update (const CodeBlob* cb, TickPosition where);
164
208 int unknown_ticks_array[ut_end];
209 int unknown_ticks() {
210 int result = 0;
211 for (int ut = 0; ut < ut_end; ut += 1) {
212 result += unknown_ticks_array[ut];
213 }
214 return result;
215 }
216
217 elapsedTimer timer;
218
219 // For interval timing
220 private:
221 IntervalData _interval_data;
222 IntervalData interval_data() {
223 return _interval_data;
224 }
225 IntervalData* interval_data_ref() {
226 return &_interval_data;
227 }
228 #endif // FPROF_KERNEL
229 };
230
231 class FlatProfiler: AllStatic {
232 public:
233 static void reset() KERNEL_RETURN ;
234 static void engage(JavaThread* mainThread, bool fullProfile) KERNEL_RETURN ;
235 static void disengage() KERNEL_RETURN ;
236 static void print(int unused) KERNEL_RETURN ;
237 static bool is_active() KERNEL_RETURN_(false) ;
238
239 // This is NULL if each thread has its own thread profiler,
240 // else this is the single thread profiler used by all threads.
241 // In particular it makes a difference during garbage collection,
242 // where you only want to traverse each thread profiler once.
243 static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(NULL);
244
245 // Garbage Collection Support
246 static void oops_do(OopClosure* f) KERNEL_RETURN ;
247
248 // Support for disassembler to inspect the PCRecorder
249
250 // Returns the start address for a given pc
251 // NULL is returned if the PCRecorder is inactive
252 static address bucket_start_for(address pc) KERNEL_RETURN_(NULL);
253
254 enum { MillisecsPerTick = 10 }; // ms per profiling ticks
255
256 // Returns the number of ticks recorded for the bucket
257 // pc belongs to.
258 static int bucket_count_for(address pc) KERNEL_RETURN_(0);
259
260 #ifndef FPROF_KERNEL
261
262 private:
263 static bool full_profile() {
264 return full_profile_flag;
265 }
266
267 friend class ThreadProfiler;
268 // the following group of ticks cover everything that's not attributed to individual Java methods
269 static int received_gc_ticks; // ticks during which gc was active
270 static int vm_operation_ticks; // total ticks in vm_operations other than GC
271 static int threads_lock_ticks; // the number of times we couldn't get the Threads_lock without blocking
272 static int blocked_ticks; // ticks when the thread was blocked.
273 static int class_loader_ticks; // total ticks in class loader
274 static int extra_ticks; // total ticks an extra temporary measuring
275 static int compiler_ticks; // total ticks in compilation
276 static int interpreter_ticks; // ticks in unknown interpreted method
277 static int deopt_ticks; // ticks in deoptimization
278 static int unknown_ticks; // ticks that cannot be categorized
279 static int received_ticks; // ticks that were received by task
280 static int delivered_ticks; // ticks that were delivered by task
307 static ThreadProfiler* thread_profiler;
308 static ThreadProfiler* vm_thread_profiler;
309
310 static void allocate_table();
311
312 // The task that periodically interrupts things.
313 friend class FlatProfilerTask;
314 static FlatProfilerTask* task;
315 static void record_vm_operation();
316 static void record_vm_tick();
317 static void record_thread_ticks();
318
319 // For interval analysis
320 private:
321 static int interval_ticks_previous; // delivered_ticks from the last interval
322 static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
323 static void interval_print(); // print interval data.
324 static void interval_reset(); // reset interval data.
325 enum {interval_print_size = 10};
326 static IntervalData* interval_data;
327 #endif // FPROF_KERNEL
328 };
329
330 #endif // SHARE_VM_RUNTIME_FPROFILER_HPP
|
48 class FlatProfiler;
49 class IntervalData;
50
51 // Declarations of classes defined only in the implementation.
52 class ProfilerNode;
53 class FlatProfilerTask;
54
55 enum TickPosition {
56 tp_code,
57 tp_native
58 };
59
60 // One of these guys is constructed as we enter interesting regions
61 // and destructed as we exit the region. While we are in the region
62 // ticks are allotted to the region.
63 class ThreadProfilerMark: public StackObj {
64 public:
65 // For now, the only thread-specific region is the class loader.
66 enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
67
68 ThreadProfilerMark(Region) NOT_FPROF_RETURN;
69 ~ThreadProfilerMark() NOT_FPROF_RETURN;
70
71 private:
72 ThreadProfiler* _pp;
73 Region _r;
74 };
75
76 #if INCLUDE_FPROF
77
78 class IntervalData VALUE_OBJ_CLASS_SPEC {
79 // Just to keep these things all together
80 private:
81 int _interpreted;
82 int _compiled;
83 int _native;
84 int _compiling;
85 public:
86 int interpreted() {
87 return _interpreted;
88 }
89 int compiled() {
90 return _compiled;
91 }
92 int native() {
93 return _native;
94 }
95 int compiling() {
96 return _compiling;
102 _interpreted += 1;
103 }
104 void inc_compiled() {
105 _compiled += 1;
106 }
107 void inc_native() {
108 _native += 1;
109 }
110 void inc_compiling() {
111 _compiling += 1;
112 }
113 void reset() {
114 _interpreted = 0;
115 _compiled = 0;
116 _native = 0;
117 _compiling = 0;
118 }
119 static void print_header(outputStream* st);
120 void print_data(outputStream* st);
121 };
122 #endif // INCLUDE_FPROF
123
124 class ThreadProfiler: public CHeapObj<mtInternal> {
125 public:
126 ThreadProfiler() NOT_FPROF_RETURN;
127 ~ThreadProfiler() NOT_FPROF_RETURN;
128
129 // Resets the profiler
130 void reset() NOT_FPROF_RETURN;
131
132 // Activates the profiler for a certain thread
133 void engage() NOT_FPROF_RETURN;
134
135 // Deactivates the profiler
136 void disengage() NOT_FPROF_RETURN;
137
138 // Prints the collected profiling information
139 void print(const char* thread_name) NOT_FPROF_RETURN;
140
141 // Garbage Collection Support
142 void oops_do(OopClosure* f) NOT_FPROF_RETURN;
143
144 #if INCLUDE_FPROF
145 private:
146 // for recording ticks.
147 friend class ProfilerNode;
148 char* area_bottom; // preallocated area for pnodes
149 char* area_top;
150 char* area_limit;
151 static int table_size;
152 ProfilerNode** table;
153
154 private:
155 void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
156 void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where);
157 void interpreted_update(Method* method, TickPosition where);
158 void compiled_update (Method* method, TickPosition where);
159 void stub_update (Method* method, const char* name, TickPosition where);
160 void adapter_update (TickPosition where);
161
162 void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
163 void unknown_compiled_update (const CodeBlob* cb, TickPosition where);
164
208 int unknown_ticks_array[ut_end];
209 int unknown_ticks() {
210 int result = 0;
211 for (int ut = 0; ut < ut_end; ut += 1) {
212 result += unknown_ticks_array[ut];
213 }
214 return result;
215 }
216
217 elapsedTimer timer;
218
219 // For interval timing
220 private:
221 IntervalData _interval_data;
222 IntervalData interval_data() {
223 return _interval_data;
224 }
225 IntervalData* interval_data_ref() {
226 return &_interval_data;
227 }
228 #endif // INCLUDE_FPROF
229 };
230
231 class FlatProfiler: AllStatic {
232 public:
233 static void reset() NOT_FPROF_RETURN ;
234 static void engage(JavaThread* mainThread, bool fullProfile) NOT_FPROF_RETURN ;
235 static void disengage() NOT_FPROF_RETURN ;
236 static void print(int unused) NOT_FPROF_RETURN ;
237 static bool is_active() NOT_FPROF_RETURN_(false) ;
238
239 // This is NULL if each thread has its own thread profiler,
240 // else this is the single thread profiler used by all threads.
241 // In particular it makes a difference during garbage collection,
242 // where you only want to traverse each thread profiler once.
243 static ThreadProfiler* get_thread_profiler() NOT_FPROF_RETURN_(NULL);
244
245 // Garbage Collection Support
246 static void oops_do(OopClosure* f) NOT_FPROF_RETURN ;
247
248 // Support for disassembler to inspect the PCRecorder
249
250 // Returns the start address for a given pc
251 // NULL is returned if the PCRecorder is inactive
252 static address bucket_start_for(address pc) NOT_FPROF_RETURN_(NULL);
253
254 enum { MillisecsPerTick = 10 }; // ms per profiling ticks
255
256 // Returns the number of ticks recorded for the bucket
257 // pc belongs to.
258 static int bucket_count_for(address pc) NOT_FPROF_RETURN_(0);
259
260 #if INCLUDE_FPROF
261
262 private:
263 static bool full_profile() {
264 return full_profile_flag;
265 }
266
267 friend class ThreadProfiler;
268 // the following group of ticks cover everything that's not attributed to individual Java methods
269 static int received_gc_ticks; // ticks during which gc was active
270 static int vm_operation_ticks; // total ticks in vm_operations other than GC
271 static int threads_lock_ticks; // the number of times we couldn't get the Threads_lock without blocking
272 static int blocked_ticks; // ticks when the thread was blocked.
273 static int class_loader_ticks; // total ticks in class loader
274 static int extra_ticks; // total ticks an extra temporary measuring
275 static int compiler_ticks; // total ticks in compilation
276 static int interpreter_ticks; // ticks in unknown interpreted method
277 static int deopt_ticks; // ticks in deoptimization
278 static int unknown_ticks; // ticks that cannot be categorized
279 static int received_ticks; // ticks that were received by task
280 static int delivered_ticks; // ticks that were delivered by task
307 static ThreadProfiler* thread_profiler;
308 static ThreadProfiler* vm_thread_profiler;
309
310 static void allocate_table();
311
312 // The task that periodically interrupts things.
313 friend class FlatProfilerTask;
314 static FlatProfilerTask* task;
315 static void record_vm_operation();
316 static void record_vm_tick();
317 static void record_thread_ticks();
318
319 // For interval analysis
320 private:
321 static int interval_ticks_previous; // delivered_ticks from the last interval
322 static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
323 static void interval_print(); // print interval data.
324 static void interval_reset(); // reset interval data.
325 enum {interval_print_size = 10};
326 static IntervalData* interval_data;
327 #endif // INCLUDE_FPROF
328 };
329
330 #endif // SHARE_VM_RUNTIME_FPROFILER_HPP
|