--- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/src/share/vm/runtime/heapMonitoring.hpp 2017-06-01 20:41:01.786592377 -0700 @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_HEAPMONITORING_HPP +#define SHARE_VM_RUNTIME_HEAPMONITORING_HPP + +#include "gc/shared/referenceProcessor.hpp" +#include "runtime/sharedRuntime.hpp" + +// Support class for sampling heap allocations across the VM. +class HeapMonitoring : AllStatic { + private: + // Cheap random number generator + static uint64_t _rnd; + static bool _initialized; + static jint _monitoring_rate; + + // Statics for the fast log + static const int kFastlogNumBits = 10; + static const int kFastlogMask = (1 << kFastlogNumBits) - 1; + static double _log_table[1<(0)) << prng_mod_power); + return (prng_mult * rnd + prng_add) & prng_mod_mask; + } + + // Adapted from //util/math/fastmath.[h|cc] by Noam Shazeer + // This mimics the VeryFastLog2 code in those files + static inline double fast_log2(const double & d) { + assert(d>0, "bad value passed to assert"); + uint64_t x = 0; + memcpy(&x, &d, sizeof(uint64_t)); + const uint32_t x_high = x >> 32; + const uint32_t y = x_high >> (20 - kFastlogNumBits) & kFastlogMask; + const int32_t exponent = ((x_high >> 20) & 0x7FF) - 1023; + return exponent + _log_table[y]; + } + + public: + static void pick_next_sample(size_t *ptr); + + static void get_live_traces(jvmtiStackTraces* stack_traces); + static void get_garbage_traces(jvmtiStackTraces* stack_traces); + static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces); + static void release_traces(jvmtiStackTraces *trace_info); + static void initialize_profiling(jint monitoring_rate, jint max_storage); + static bool initialized(); + static bool *initialized_address(); + + // Called when o is allocated, called by interpreter and C1. + static void object_alloc_unsized(oopDesc* o); + static void object_alloc(oopDesc* o, intx byte_size); + + // Called when o is allocated from C2 directly, + // we know the thread, and we have done the sampling. + static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes); + + // Called to clean up oops that have been saved by our sampling function, + // but which no longer have other references in the heap. + static void do_weak_oops(AbstractRefProcTaskExecutor *task_executor, + BoolObjectClosure* is_alive, + OopClosure *f, + VoidClosure *complete_gc); + static void do_weak_oops(OopClosure* oop_closure) { + do_weak_oops(NULL, NULL, oop_closure, NULL); + } +}; + +#endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP