1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_FPROFILER_HPP
  26 #define SHARE_VM_RUNTIME_FPROFILER_HPP
  27 
  28 #include "runtime/timer.hpp"
  29 #ifdef TARGET_OS_FAMILY_linux
  30 # include "thread_linux.inline.hpp"
  31 #endif
  32 #ifdef TARGET_OS_FAMILY_solaris
  33 # include "thread_solaris.inline.hpp"
  34 #endif
  35 #ifdef TARGET_OS_FAMILY_windows
  36 # include "thread_windows.inline.hpp"
  37 #endif
  38 
  39 // a simple flat profiler for Java
  40 
  41 
  42 // Forward declaration of classes defined in this header file
  43 class ThreadProfiler;
  44 class ThreadProfilerMark;
  45 class FlatProfiler;
  46 class IntervalData;
  47 
  48 // Declarations of classes defined only in the implementation.
  49 class ProfilerNode;
  50 class FlatProfilerTask;
  51 
  52 enum TickPosition {
  53   tp_code,
  54   tp_native
  55 };
  56 
  57 // One of these guys is constructed as we enter interesting regions
  58 // and destructed as we exit the region.  While we are in the region
  59 // ticks are allotted to the region.
  60 class ThreadProfilerMark: public StackObj {
  61 public:
  62   // For now, the only thread-specific region is the class loader.
  63   enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
  64 
  65   ThreadProfilerMark(Region)  KERNEL_RETURN;
  66   ~ThreadProfilerMark()       KERNEL_RETURN;
  67 
  68 private:
  69   ThreadProfiler* _pp;
  70   Region _r;
  71 };
  72 
  73 #ifndef FPROF_KERNEL
  74 
  75 class IntervalData VALUE_OBJ_CLASS_SPEC {
  76   // Just to keep these things all together
  77 private:
  78   int _interpreted;
  79   int _compiled;
  80   int _native;
  81   int _compiling;
  82 public:
  83   int interpreted() {
  84     return _interpreted;
  85   }
  86   int compiled() {
  87     return _compiled;
  88   }
  89   int native() {
  90     return _native;
  91   }
  92   int compiling() {
  93     return _compiling;
  94   }
  95   int total() {
  96     return (interpreted() + compiled() + native() + compiling());
  97   }
  98   void inc_interpreted() {
  99     _interpreted += 1;
 100   }
 101   void inc_compiled() {
 102     _compiled += 1;
 103   }
 104   void inc_native() {
 105     _native += 1;
 106   }
 107   void inc_compiling() {
 108     _compiling += 1;
 109   }
 110   void reset() {
 111     _interpreted = 0;
 112     _compiled = 0;
 113     _native = 0;
 114     _compiling = 0;
 115   }
 116   static void print_header(outputStream* st);
 117   void print_data(outputStream* st);
 118 };
 119 #endif // FPROF_KERNEL
 120 
 121 class ThreadProfiler: public CHeapObj {
 122 public:
 123   ThreadProfiler()    KERNEL_RETURN;
 124   ~ThreadProfiler()   KERNEL_RETURN;
 125 
 126   // Resets the profiler
 127   void reset()        KERNEL_RETURN;
 128 
 129   // Activates the profiler for a certain thread
 130   void engage()       KERNEL_RETURN;
 131 
 132   // Deactivates the profiler
 133   void disengage()    KERNEL_RETURN;
 134 
 135   // Prints the collected profiling information
 136   void print(const char* thread_name) KERNEL_RETURN;
 137 
 138   // Garbage Collection Support
 139   void oops_do(OopClosure* f)         KERNEL_RETURN;
 140 
 141 #ifndef FPROF_KERNEL
 142 private:
 143   // for recording ticks.
 144   friend class ProfilerNode;
 145   char* area_bottom; // preallocated area for pnodes
 146   char* area_top;
 147   char* area_limit;
 148   static int            table_size;
 149   ProfilerNode** table;
 150 
 151 private:
 152   void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
 153   void record_compiled_tick   (JavaThread* thread, frame fr, TickPosition where);
 154   void interpreted_update(methodOop method, TickPosition where);
 155   void compiled_update   (methodOop method, TickPosition where);
 156   void stub_update       (methodOop method, const char* name, TickPosition where);
 157   void adapter_update    (TickPosition where);
 158 
 159   void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
 160   void unknown_compiled_update    (const CodeBlob* cb, TickPosition where);
 161 
 162   void vm_update    (TickPosition where);
 163   void vm_update    (const char* name, TickPosition where);
 164 
 165   void record_tick_for_running_frame(JavaThread* thread, frame fr);
 166   void record_tick_for_calling_frame(JavaThread* thread, frame fr);
 167 
 168   void initialize();
 169 
 170   static int  entry(int value);
 171 
 172 
 173 private:
 174   friend class FlatProfiler;
 175   void record_tick(JavaThread* thread);
 176   bool engaged;
 177   // so we can do percentages for this thread, and quick checks for activity
 178   int thread_ticks;
 179   int compiler_ticks;
 180   int interpreter_ticks;
 181 
 182 public:
 183   void inc_thread_ticks() { thread_ticks += 1; }
 184 
 185 private:
 186   friend class ThreadProfilerMark;
 187   // counters for thread-specific regions
 188   bool region_flag[ThreadProfilerMark::maxRegion];
 189   int class_loader_ticks;
 190   int extra_ticks;
 191 
 192 private:
 193   // other thread-specific regions
 194   int blocked_ticks;
 195   enum UnknownTickSites {
 196       ut_null_method,
 197       ut_vtable_stubs,
 198       ut_running_frame,
 199       ut_calling_frame,
 200       ut_no_pc,
 201       ut_no_last_Java_frame,
 202       ut_unknown_thread_state,
 203       ut_end
 204   };
 205   int unknown_ticks_array[ut_end];
 206   int unknown_ticks() {
 207     int result = 0;
 208     for (int ut = 0; ut < ut_end; ut += 1) {
 209       result += unknown_ticks_array[ut];
 210     }
 211     return result;
 212   }
 213 
 214   elapsedTimer timer;
 215 
 216   // For interval timing
 217 private:
 218   IntervalData _interval_data;
 219   IntervalData interval_data() {
 220     return _interval_data;
 221   }
 222   IntervalData* interval_data_ref() {
 223     return &_interval_data;
 224   }
 225 #endif // FPROF_KERNEL
 226 };
 227 
 228 class FlatProfiler: AllStatic {
 229 public:
 230   static void reset() KERNEL_RETURN ;
 231   static void engage(JavaThread* mainThread, bool fullProfile) KERNEL_RETURN ;
 232   static void disengage() KERNEL_RETURN ;
 233   static void print(int unused) KERNEL_RETURN ;
 234   static bool is_active() KERNEL_RETURN_(false) ;
 235 
 236   // This is NULL if each thread has its own thread profiler,
 237   // else this is the single thread profiler used by all threads.
 238   // In particular it makes a difference during garbage collection,
 239   // where you only want to traverse each thread profiler once.
 240   static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(NULL);
 241 
 242   // Garbage Collection Support
 243   static void oops_do(OopClosure* f) KERNEL_RETURN ;
 244 
 245   // Support for disassembler to inspect the PCRecorder
 246 
 247   // Returns the start address for a given pc
 248   // NULL is returned if the PCRecorder is inactive
 249   static address bucket_start_for(address pc) KERNEL_RETURN_(NULL);
 250 
 251   enum { MillisecsPerTick = 10 };   // ms per profiling ticks
 252 
 253   // Returns the number of ticks recorded for the bucket
 254   // pc belongs to.
 255   static int bucket_count_for(address pc) KERNEL_RETURN_(0);
 256 
 257 #ifndef FPROF_KERNEL
 258 
 259  private:
 260   static bool full_profile() {
 261     return full_profile_flag;
 262   }
 263 
 264   friend class ThreadProfiler;
 265   // the following group of ticks cover everything that's not attributed to individual Java methods
 266   static int  received_gc_ticks;      // ticks during which gc was active
 267   static int vm_operation_ticks;      // total ticks in vm_operations other than GC
 268   static int threads_lock_ticks;      // the number of times we couldn't get the Threads_lock without blocking
 269   static int      blocked_ticks;      // ticks when the thread was blocked.
 270   static int class_loader_ticks;      // total ticks in class loader
 271   static int        extra_ticks;      // total ticks an extra temporary measuring
 272   static int     compiler_ticks;      // total ticks in compilation
 273   static int  interpreter_ticks;      // ticks in unknown interpreted method
 274   static int        deopt_ticks;      // ticks in deoptimization
 275   static int      unknown_ticks;      // ticks that cannot be categorized
 276   static int     received_ticks;      // ticks that were received by task
 277   static int    delivered_ticks;      // ticks that were delivered by task
 278   static int non_method_ticks() {
 279     return
 280       ( received_gc_ticks
 281       + vm_operation_ticks
 282       + deopt_ticks
 283       + threads_lock_ticks
 284       + blocked_ticks
 285       + compiler_ticks
 286       + interpreter_ticks
 287       + unknown_ticks );
 288   }
 289   static elapsedTimer timer;
 290 
 291   // Counts of each of the byte codes
 292   static int*           bytecode_ticks;
 293   static int*           bytecode_ticks_stub;
 294   static void print_byte_code_statistics();
 295 
 296   // the ticks below are for continuous profiling (to adjust recompilation, etc.)
 297   static int          all_ticks;      // total count of ticks received so far
 298   static int      all_int_ticks;      // ticks in interpreter
 299   static int     all_comp_ticks;      // ticks in compiled code (+ native)
 300   static bool full_profile_flag;      // collecting full profile?
 301 
 302   // to accumulate thread-specific data
 303   // if we aren't profiling individual threads.
 304   static ThreadProfiler* thread_profiler;
 305   static ThreadProfiler* vm_thread_profiler;
 306 
 307   static void allocate_table();
 308 
 309   // The task that periodically interrupts things.
 310   friend class FlatProfilerTask;
 311   static FlatProfilerTask* task;
 312   static void record_vm_operation();
 313   static void record_vm_tick();
 314   static void record_thread_ticks();
 315 
 316   // For interval analysis
 317  private:
 318   static int interval_ticks_previous;  // delivered_ticks from the last interval
 319   static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
 320   static void interval_print();       // print interval data.
 321   static void interval_reset();       // reset interval data.
 322   enum {interval_print_size = 10};
 323   static IntervalData* interval_data;
 324 #endif // FPROF_KERNEL
 325 };
 326 
 327 #endif // SHARE_VM_RUNTIME_FPROFILER_HPP