1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_HEAPMONITORING_HPP
  26 #define SHARE_VM_RUNTIME_HEAPMONITORING_HPP
  27 
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "runtime/sharedRuntime.hpp"
  30 
  31 // Support class for sampling heap allocations across the VM.
  32 class HeapMonitoring : AllStatic {
  33  private:
  34   // Cheap random number generator
  35   static uint64_t _rnd;
  36   static jint _monitoring_rate;
  37   static bool _enabled;
  38 
  39   // Statics for the fast log
  40   static const int FastLogNumBits = 10;
  41   static const int FastLogMask = (1 << FastLogNumBits) - 1;
  42   static double _log_table[1<<FastLogNumBits];  // Constant
  43 
  44   static void pick_next_sample(JavaThread *t);
  45 
  46   // Returns the next prng value.
  47   // pRNG is: aX+b mod c with a = 0x5DEECE66D, b =  0xB, c = 1<<48
  48   // This is the lrand64 generator.
  49   static inline uint64_t next_random(uint64_t rnd) {
  50     const uint64_t PrngMult = 0x5DEECE66DLL;
  51     const uint64_t prng_add = 0xB;
  52     const uint64_t prng_mod_power = 48;
  53     const uint64_t prng_mod_mask = right_n_bits(prng_mod_power);
  54     return (PrngMult * rnd + prng_add) & prng_mod_mask;
  55   }
  56 
  57   static inline double fast_log2(const double & d) {
  58     assert(d>0, "bad value passed to assert");
  59     uint64_t x = 0;
  60     memcpy(&x, &d, sizeof(uint64_t));
  61     const uint32_t x_high = x >> 32;
  62     const uint32_t y = x_high >> (20 - FastLogNumBits) & FastLogMask;
  63     const int32_t exponent = ((x_high >> 20) & 0x7FF) - 1023;
  64     return exponent + _log_table[y];
  65   }
  66 
  67  public:
  68   /*
  69    * General note: currently none of these methods are deemed thread-safe.
  70    */
  71 
  72   // First method called by user to start the profiler:
  73   //   - Note: the lower the monitoring rate, the higher the overhead incurred.
  74   static void initialize_profiling(jint monitoring_rate, jint max_gc_storage);
  75 
  76   // Pick the next sample for a given size_t pointer using a geometric variable
  77   // with specified mean. The specified mean is provided via the
  78   // initialize_profiling method.
  79   static void pick_next_sample(size_t *ptr);
  80 
  81   // Get live/garbage traces and provide a method to release the traces.
  82   static void get_live_traces(jvmtiStackTraces* stack_traces);
  83   static void get_garbage_traces(jvmtiStackTraces* stack_traces);
  84   static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces);
  85   static void release_traces(jvmtiStackTraces *trace_info);
  86 
  87   static void get_sampling_statistics(jvmtiHeapSamplingStats* stats);
  88   static void stop_profiling();
  89 
  90   // Called when o is to be sampled from a given thread and a given size.
  91   static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes);
  92 
  93   // Called to clean up oops that have been saved by our sampling function,
  94   // but which no longer have other references in the heap.
  95   static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f);
  96   static void weak_oops_do(OopClosure* oop_closure) {
  97     AlwaysTrueClosure _always_true;
  98     weak_oops_do(&_always_true, oop_closure);
  99   }
 100 
 101   static bool enabled() {
 102     return _enabled;
 103   }
 104 };
 105 
 106 #endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP