23 */
24
25 #ifndef SHARE_VM_RUNTIME_HEAPMONITORING_HPP
26 #define SHARE_VM_RUNTIME_HEAPMONITORING_HPP
27
28 #include "gc/shared/referenceProcessor.hpp"
29 #include "runtime/sharedRuntime.hpp"
30
31 // Support class for sampling heap allocations across the VM.
32 class HeapMonitoring : AllStatic {
33 private:
34 // Cheap random number generator
35 static uint64_t _rnd;
36 static jint _monitoring_rate;
37 static bool _enabled;
38
39 // Statics for the fast log
40 static const int FastLogNumBits = 10;
41 static const int FastLogMask = (1 << FastLogNumBits) - 1;
42 static double _log_table[1<<FastLogNumBits]; // Constant
43 static AlwaysTrueClosure _always_true;
44
45 static void pick_next_sample(JavaThread *t);
46
47 // Returns the next prng value.
48 // pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48
49 // This is the lrand64 generator.
50 static inline uint64_t next_random(uint64_t rnd) {
51 const uint64_t PrngMult = 0x5DEECE66DLL;
52 const uint64_t prng_add = 0xB;
53 const uint64_t prng_mod_power = 48;
54 const uint64_t prng_mod_mask =
55 ~((~static_cast<uint64_t>(0)) << prng_mod_power);
56 return (PrngMult * rnd + prng_add) & prng_mod_mask;
57 }
58
59 static inline double fast_log2(const double & d) {
60 assert(d>0, "bad value passed to assert");
61 uint64_t x = 0;
62 memcpy(&x, &d, sizeof(uint64_t));
63 const uint32_t x_high = x >> 32;
64 const uint32_t y = x_high >> (20 - FastLogNumBits) & FastLogMask;
65 const int32_t exponent = ((x_high >> 20) & 0x7FF) - 1023;
66 return exponent + _log_table[y];
67 }
68
69 public:
70 /*
71 * General note: currently none of these methods are deemed thread-safe.
72 */
73
74 // First method called by user to start the profiler:
75 // - Note: the lower the monitoring rate, the higher the overhead incurred.
79 // with specified mean. The specified mean is provided via the
80 // initialize_profiling method.
81 static void pick_next_sample(size_t *ptr);
82
83 // Get live/garbage traces and provide a method to release the traces.
84 static void get_live_traces(jvmtiStackTraces* stack_traces);
85 static void get_garbage_traces(jvmtiStackTraces* stack_traces);
86 static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces);
87 static void release_traces(jvmtiStackTraces *trace_info);
88
89 static void get_sampling_statistics(jvmtiHeapSamplingStats* stats);
90 static void stop_profiling();
91
92 // Called when o is to be sampled from a given thread and a given size.
93 static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes);
94
95 // Called to clean up oops that have been saved by our sampling function,
96 // but which no longer have other references in the heap.
97 static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f);
98 static void weak_oops_do(OopClosure* oop_closure) {
99 weak_oops_do(&_always_true, oop_closure);
100 }
101
102 static bool enabled() {
103 return _enabled;
104 }
105 };
106
107 #endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP
|
23 */
24
25 #ifndef SHARE_VM_RUNTIME_HEAPMONITORING_HPP
26 #define SHARE_VM_RUNTIME_HEAPMONITORING_HPP
27
28 #include "gc/shared/referenceProcessor.hpp"
29 #include "runtime/sharedRuntime.hpp"
30
31 // Support class for sampling heap allocations across the VM.
32 class HeapMonitoring : AllStatic {
33 private:
34 // Cheap random number generator
35 static uint64_t _rnd;
36 static jint _monitoring_rate;
37 static bool _enabled;
38
39 // Statics for the fast log
40 static const int FastLogNumBits = 10;
41 static const int FastLogMask = (1 << FastLogNumBits) - 1;
42 static double _log_table[1<<FastLogNumBits]; // Constant
43
44 static void pick_next_sample(JavaThread *t);
45
46 // Returns the next prng value.
47 // pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48
48 // This is the lrand64 generator.
49 static inline uint64_t next_random(uint64_t rnd) {
50 const uint64_t PrngMult = 0x5DEECE66DLL;
51 const uint64_t prng_add = 0xB;
52 const uint64_t prng_mod_power = 48;
53 const uint64_t prng_mod_mask = right_n_bits(prng_mod_power);
54 return (PrngMult * rnd + prng_add) & prng_mod_mask;
55 }
56
57 static inline double fast_log2(const double & d) {
58 assert(d>0, "bad value passed to assert");
59 uint64_t x = 0;
60 memcpy(&x, &d, sizeof(uint64_t));
61 const uint32_t x_high = x >> 32;
62 const uint32_t y = x_high >> (20 - FastLogNumBits) & FastLogMask;
63 const int32_t exponent = ((x_high >> 20) & 0x7FF) - 1023;
64 return exponent + _log_table[y];
65 }
66
67 public:
68 /*
69 * General note: currently none of these methods are deemed thread-safe.
70 */
71
72 // First method called by user to start the profiler:
73 // - Note: the lower the monitoring rate, the higher the overhead incurred.
77 // with specified mean. The specified mean is provided via the
78 // initialize_profiling method.
79 static void pick_next_sample(size_t *ptr);
80
81 // Get live/garbage traces and provide a method to release the traces.
82 static void get_live_traces(jvmtiStackTraces* stack_traces);
83 static void get_garbage_traces(jvmtiStackTraces* stack_traces);
84 static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces);
85 static void release_traces(jvmtiStackTraces *trace_info);
86
87 static void get_sampling_statistics(jvmtiHeapSamplingStats* stats);
88 static void stop_profiling();
89
90 // Called when o is to be sampled from a given thread and a given size.
91 static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes);
92
93 // Called to clean up oops that have been saved by our sampling function,
94 // but which no longer have other references in the heap.
95 static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f);
96 static void weak_oops_do(OopClosure* oop_closure) {
97 AlwaysTrueClosure _always_true;
98 weak_oops_do(&_always_true, oop_closure);
99 }
100
101 static bool enabled() {
102 return _enabled;
103 }
104 };
105
106 #endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP
|