1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_HEAPMONITORING_HPP
  26 #define SHARE_VM_RUNTIME_HEAPMONITORING_HPP
  27 
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "runtime/sharedRuntime.hpp"
  30 
  31 // Support class for sampling heap allocations across the VM.
  32 class HeapMonitoring : AllStatic {
  33  private:
  34   // Cheap random number generator
  35   static uint64_t _rnd;
  36   static bool _initialized;
  37   static jint _monitoring_rate;
  38   static bool _enabled;
  39 
  40   // Statics for the fast log
  41   static const int _fast_log_num_bits = 10;
  42   static const int _fast_log_mask = (1 << _fast_log_num_bits) - 1;
  43   static double _log_table[1<<_fast_log_num_bits];  // Constant
  44 
  45   static void pick_next_sample(JavaThread *t);
  46 
  47   // Returns the next prng value.
  48   // pRNG is: aX+b mod c with a = 0x5DEECE66D, b =  0xB, c = 1<<48
  49   // This is the lrand64 generator.
  50   static inline uint64_t next_random(uint64_t rnd) {
  51     const uint64_t prng_mult = 0x5DEECE66DLL;
  52     const uint64_t prng_add = 0xB;
  53     const uint64_t prng_mod_power = 48;
  54     const uint64_t prng_mod_mask =
  55         ~((~static_cast<uint64_t>(0)) << prng_mod_power);
  56     return (prng_mult * rnd + prng_add) & prng_mod_mask;
  57   }
  58 
  59   static inline double fast_log2(const double & d) {
  60     assert(d>0, "bad value passed to assert");
  61     uint64_t x = 0;
  62     memcpy(&x, &d, sizeof(uint64_t));
  63     const uint32_t x_high = x >> 32;
  64     const uint32_t y = x_high >> (20 - _fast_log_num_bits) & _fast_log_mask;
  65     const int32_t exponent = ((x_high >> 20) & 0x7FF) - 1023;
  66     return exponent + _log_table[y];
  67   }
  68 
  69  public:
  70   static void pick_next_sample(size_t *ptr);
  71 
  72   static void get_live_traces(jvmtiStackTraces* stack_traces);
  73   static void get_garbage_traces(jvmtiStackTraces* stack_traces);
  74   static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces);
  75   static void release_traces(jvmtiStackTraces *trace_info);
  76   static void initialize_profiling(jint monitoring_rate, jint max_storage);
  77   static void stop_profiling();
  78   static bool initialized();
  79   static bool *initialized_address();
  80 
  81   // Called when o is allocated, called by interpreter and C1.
  82   static void object_alloc_unsized(oopDesc* o);
  83   static void object_alloc(oopDesc* o, intx byte_size);
  84 
  85   // Called when o is allocated from C2 directly,
  86   // we know the thread, and we have done the sampling.
  87   static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes);
  88 
  89   // Called to clean up oops that have been saved by our sampling function,
  90   // but which no longer have other references in the heap.
  91   static void weak_oops_do(AbstractRefProcTaskExecutor *task_executor,
  92                            BoolObjectClosure* is_alive,
  93                            OopClosure *f,
  94                            VoidClosure *complete_gc);
  95   static void weak_oops_do(OopClosure* oop_closure) {
  96     weak_oops_do(NULL, NULL, oop_closure, NULL);
  97   }
  98 
  99   static bool enabled() {
 100     return _enabled;
 101   }
 102 };
 103 
 104 #endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP