/* * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" #include "prims/jvmtiEnvBase.hpp" #include "runtime/heapMonitoring.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/vframe.hpp" static const int MaxStackDepth = 1024; // Internal data structure representing traces, used when object has been GC'd. class StackTraceData : public CHeapObj { private: jvmtiAllocTraceInfo* _trace; int _references; public: StackTraceData(jvmtiAllocTraceInfo* t) : _trace(t), _references(0) {} void increment_reference_count() { _references++; } jvmtiAllocTraceInfo* get_trace() const { return _trace; } static void unreference_and_free(StackTraceData* data) { if (!data) { return; } data->_references--; if (data->_references == 0) { if (data->_trace != NULL) { jvmtiStackInfo* stack_info = data->_trace->stack_info; FREE_C_HEAP_ARRAY(jvmtiFrameInfo, stack_info->frame_buffer); FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(data->_trace); } delete data; } } }; // Internal data structure representing traces with the oop, used while object // is live. Since this structure just passes the trace to the GC lists, it does // not handle any freeing. class StackTraceDataWithOop : public StackTraceData { private: oop _obj; public: StackTraceDataWithOop(jvmtiAllocTraceInfo* t, oop o) : StackTraceData(t) { store_oop(o); } StackTraceDataWithOop() : StackTraceData(NULL), _obj(NULL) { } oop load_oop() { return RootAccess::oop_load(&_obj); } oop* get_oop_addr() { return &_obj; } void store_oop(oop value) { RootAccess::oop_store(&_obj, value); } void clear_oop() { store_oop(reinterpret_cast(NULL)); } }; // Fixed size buffer for holding garbage traces. class GarbageTracesBuffer : public CHeapObj { public: GarbageTracesBuffer(uint32_t size) : _size(size) { _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*, size, mtInternal); memset(_garbage_traces, 0, sizeof(StackTraceData*) * size); } virtual ~GarbageTracesBuffer() { FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces); } StackTraceData** get_traces() const { return _garbage_traces; } bool store_trace(StackTraceData* trace) { uint32_t index; if (!select_replacement(&index)) { return false; } StackTraceData* old_data = _garbage_traces[index]; StackTraceData::unreference_and_free(old_data); trace->increment_reference_count(); _garbage_traces[index] = trace; return true; } uint32_t size() const { return _size; } protected: // Subclasses select the trace to replace. Returns false if no replacement // is to happen, otherwise stores the index of the trace to replace in // *index. virtual bool select_replacement(uint32_t* index) = 0; const uint32_t _size; private: // The current garbage traces. A fixed-size ring buffer. StackTraceData** _garbage_traces; }; // Keep statistical sample of traces over the lifetime of the server. // When the buffer is full, replace a random entry with probability // 1/samples_seen. This strategy tends towards preserving the most frequently // occuring traces over time. class FrequentGarbageTraces : public GarbageTracesBuffer { public: FrequentGarbageTraces(int size) : GarbageTracesBuffer(size), _garbage_traces_pos(0), _samples_seen(0) { } virtual ~FrequentGarbageTraces() { } virtual bool select_replacement(uint32_t* index) { ++_samples_seen; if (_garbage_traces_pos < _size) { *index = _garbage_traces_pos++; return true; } uint64_t random_uint64 = (static_cast(::random()) << 32) | ::random(); uint32_t random_index = random_uint64 % _samples_seen; if (random_index < _size) { *index = random_index; return true; } return false; } private: // The current position in the buffer as we initially fill it. uint32_t _garbage_traces_pos; uint64_t _samples_seen; }; // Store most recent garbage traces. class MostRecentGarbageTraces : public GarbageTracesBuffer { public: MostRecentGarbageTraces(int size) : GarbageTracesBuffer(size), _garbage_traces_pos(0) { } virtual ~MostRecentGarbageTraces() { } virtual bool select_replacement(uint32_t* index) { *index = _garbage_traces_pos; _garbage_traces_pos = (_garbage_traces_pos + 1) % _size; return true; } private: // The current position in the buffer. uint32_t _garbage_traces_pos; }; // Each object that we profile is stored as trace with the thread_id. class StackTraceStorage : public CHeapObj { public: // The function that gets called to add a trace to the list of // traces we are maintaining. void add_trace(jvmtiAllocTraceInfo* trace, oop o); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. void get_all_stack_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. void get_garbage_stack_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. void get_frequent_garbage_stack_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. void get_cached_stack_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr); // Executes whenever weak references are traversed. is_alive tells // you if the given oop is still reachable and live. void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f); ~StackTraceStorage(); StackTraceStorage(); static StackTraceStorage* storage() { static StackTraceStorage internal_storage; return &internal_storage; } void initialize(int max_storage) { MutexLocker mu(HeapMonitorStorage_lock); allocate_storage(max_storage); } void stop() { MutexLocker mu(HeapMonitorStorage_lock); free_storage(); } const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { MutexLocker mu(HeapMonitorStorage_lock); return _stats; } void accumulate_sample_rate(size_t rate) { MutexLocker mu(HeapMonitorStorage_lock); _stats.sample_rate_accumulation += rate; _stats.sample_rate_count++; } bool initialized() { return OrderAccess::load_acquire(&_initialized) != 0; return _initialized; } private: // The traces currently sampled. GrowableArray* _allocated_traces; // The traces currently sampled. GrowableArray* _traces_on_last_full_gc; // Recent garbage traces. MostRecentGarbageTraces* _recent_garbage_traces; // Frequent garbage traces. FrequentGarbageTraces* _frequent_garbage_traces; // Heap Sampling statistics. jvmtiHeapSamplingStats _stats; // Maximum amount of storage provided by the JVMTI call initialize_profiling. int _max_gc_storage; static StackTraceStorage* internal_storage; int _initialized; // Support functions and classes for copying data to the external // world. class StackTraceDataCopier { public: virtual int size() const = 0; virtual const StackTraceData* get(uint32_t i) const = 0; }; class LiveStackTraceDataCopier : public StackTraceDataCopier { public: LiveStackTraceDataCopier(GrowableArray* data) : _data(data) {} int size() const { return _data ? _data->length() : 0; } const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); } private: GrowableArray* _data; }; class GarbageStackTraceDataCopier : public StackTraceDataCopier { public: GarbageStackTraceDataCopier(StackTraceData** data, int size) : _data(data), _size(size) {} int size() const { return _size; } const StackTraceData* get(uint32_t i) const { return _data[i]; } private: StackTraceData** _data; int _size; }; // Creates a deep copy of the list of StackTraceData. void copy_stack_traces(JvmtiEnv* env, const StackTraceDataCopier &copier, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr); void store_garbage_trace(const StackTraceDataWithOop &trace); void free_garbage(); void free_storage(); void reset(); void allocate_storage(int max_gc_storage); int calculate_frame_count(const StackTraceDataCopier &copier); int calculate_info_count(const StackTraceDataCopier &copier); bool copy_frame(const StackTraceData* stack_trace_data, jvmtiAllocTraceInfo* current_alloc_traces, jvmtiStackInfo* current_stack_info, jvmtiFrameInfo* current_frame_info); // Returns frame copy success. Failure can result when there is no longer // enough memory. bool copy_frames(const StackTraceDataCopier& copier, int info_count, unsigned char* start, unsigned char* end); }; StackTraceStorage* StackTraceStorage::internal_storage; // Statics for Sampler double HeapMonitoring::_log_table[1 << FastLogNumBits]; int HeapMonitoring::_enabled; jint HeapMonitoring::_monitoring_rate; // Cheap random number generator uint64_t HeapMonitoring::_rnd; StackTraceStorage::StackTraceStorage() { MutexLocker mu(HeapMonitorStorage_lock); reset(); } void StackTraceStorage::reset() { assert(HeapMonitorStorage_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "This should not be accessed concurrently"); _allocated_traces = NULL; _traces_on_last_full_gc = NULL; _recent_garbage_traces = NULL; _frequent_garbage_traces = NULL; _max_gc_storage = 0; OrderAccess::release_store(&_initialized, 0); } void StackTraceStorage::free_garbage() { StackTraceData** recent_garbage = NULL; uint32_t recent_size = 0; StackTraceData** frequent_garbage = NULL; uint32_t frequent_size = 0; if (_recent_garbage_traces != NULL) { recent_garbage = _recent_garbage_traces->get_traces(); recent_size = _recent_garbage_traces->size(); } if (_frequent_garbage_traces != NULL) { frequent_garbage = _frequent_garbage_traces->get_traces(); frequent_size = _frequent_garbage_traces->size(); } // Simple solution since this happens at exit. // Go through the recent and remove any that only are referenced there. for (uint32_t i = 0; i < recent_size; i++) { StackTraceData::unreference_and_free(recent_garbage[i]); } // Then go through the frequent and remove those that are now only there. for (uint32_t i = 0; i < frequent_size; i++) { StackTraceData::unreference_and_free(frequent_garbage[i]); } } void StackTraceStorage::free_storage() { if (!initialized()) { return; } delete _allocated_traces; delete _traces_on_last_full_gc; free_garbage(); delete _recent_garbage_traces; delete _frequent_garbage_traces; reset(); } StackTraceStorage::~StackTraceStorage() { MutexLocker mu(HeapMonitorStorage_lock); free_storage(); } void StackTraceStorage::allocate_storage(int max_gc_storage) { assert(HeapMonitorStorage_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "This should not be accessed concurrently"); // In case multiple threads got locked and then 1 by 1 got through. if (initialized()) { return; } _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(128, true); _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(128, true); _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); _max_gc_storage = max_gc_storage; memset(&_stats, 0, sizeof(_stats)); OrderAccess::release_store(&_initialized, 1); } void StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) { MutexLocker mu(HeapMonitorStorage_lock); // Last minute check on initialization here in case: // Between the moment object_alloc_do_sample's check for initialization // and now, there was a stop() that deleted the data. if (initialized()) { StackTraceDataWithOop new_data(trace, o); _stats.sample_count++; _stats.stack_depth_accumulation += trace->stack_info->frame_count; _allocated_traces->append(new_data); } } void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { size_t count = 0; if (initialized()) { int len = _allocated_traces->length(); _traces_on_last_full_gc->clear(); // Compact the oop traces. Moves the live oops to the beginning of the // growable array, potentially overwriting the dead ones. for (int i = 0; i < len; i++) { StackTraceDataWithOop &trace = _allocated_traces->at(i); oop value = trace.load_oop(); if (is_alive->do_object_b(value)) { // Update the oop to point to the new object if it is still alive. f->do_oop(trace.get_oop_addr()); // Copy the old trace, if it is still live. _allocated_traces->at_put(count++, trace); // Store the live trace in a cache, to be served up on /heapz. _traces_on_last_full_gc->append(trace); } else { trace.clear_oop(); // If the old trace is no longer live, add it to the list of // recently collected garbage. store_garbage_trace(trace); } } // Zero out remaining array elements. Even though the call to trunc_to // below truncates these values, zeroing them out is good practice. StackTraceDataWithOop zero_trace; for (int i = count; i < len; i++) { _allocated_traces->at_put(i, zero_trace); } // Set the array's length to the number of live elements. _allocated_traces->trunc_to(count); } log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count); } // Called by the outside world; returns a copy of the stack traces // (because we could be replacing them as the user handles them). // The array is secretly null-terminated (to make it easier to reclaim). void StackTraceStorage::get_all_stack_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_allocated_traces) { *traces = NULL; *trace_counter_ptr = 0; return; } LiveStackTraceDataCopier copier(_allocated_traces); copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces void StackTraceStorage::get_garbage_stack_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_recent_garbage_traces) { *traces = NULL; *trace_counter_ptr = 0; return; } GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), _recent_garbage_traces->size()); copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces void StackTraceStorage::get_frequent_garbage_stack_traces( JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_frequent_garbage_traces) { *traces = NULL; *trace_counter_ptr = 0; return; } GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), _frequent_garbage_traces->size()); copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces void StackTraceStorage::get_cached_stack_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_traces_on_last_full_gc) { *traces = NULL; *trace_counter_ptr = 0; return; } LiveStackTraceDataCopier copier(_traces_on_last_full_gc); copy_stack_traces(env, copier, traces, trace_counter_ptr); } int StackTraceStorage::calculate_frame_count(const StackTraceDataCopier &copier) { int len = copier.size(); // Walk the traces first to find the size of the frames as well. int frame_total = 0; for (int i = 0; i < len; i++) { const StackTraceData* stack_trace = copier.get(i); if (stack_trace != NULL) { jvmtiAllocTraceInfo* trace = stack_trace->get_trace(); jvmtiStackInfo* stack_info = trace->stack_info; frame_total += stack_info->frame_count; } } return frame_total; } int StackTraceStorage::calculate_info_count(const StackTraceDataCopier &copier) { int len = copier.size(); int info_total = 0; for (int i = 0; i < len; i++) { const StackTraceData* stack_trace = copier.get(i); if (stack_trace != NULL) { // TODO: merge this with the method above. info_total++; } } return info_total; } // Method to test if the data structure would fit between the src address and // the end address. template static bool next_ptr_less_or_equal(T src, U* end) { return (src + 1) <= reinterpret_cast(end); } bool StackTraceStorage::copy_frame(const StackTraceData* stack_trace_data, jvmtiAllocTraceInfo* current_alloc_trace, jvmtiStackInfo* current_stack_info, jvmtiFrameInfo* current_frame_info) { jvmtiAllocTraceInfo* trace = stack_trace_data->get_trace(); jvmtiStackInfo* stack_info = trace->stack_info; int frame_count = stack_info->frame_count; memcpy(current_alloc_trace, trace, sizeof(*trace)); current_alloc_trace->stack_info = current_stack_info; memcpy(current_stack_info, stack_info, sizeof(*stack_info)); current_stack_info->frame_buffer = current_frame_info; memcpy(current_frame_info, stack_info->frame_buffer, sizeof(jvmtiFrameInfo) * frame_count); return true; } bool StackTraceStorage::copy_frames(const StackTraceDataCopier& copier, int info_count, unsigned char* start, unsigned char* end) { jvmtiAllocTraceInfo* start_alloc_trace = reinterpret_cast(start); jvmtiStackInfo* start_stack_info = reinterpret_cast(start_alloc_trace + info_count); jvmtiFrameInfo* start_frame_info = reinterpret_cast(start_stack_info + info_count); jvmtiAllocTraceInfo* current_alloc_trace = start_alloc_trace; jvmtiStackInfo* current_stack_info = start_stack_info; jvmtiFrameInfo* current_frame_info = start_frame_info; for (int i = 0; i < info_count; i++) { assert(next_ptr_less_or_equal(current_alloc_trace, start_stack_info), "jvmtiAllocTraceInfo would write over jvmtiStackInfos."); assert(next_ptr_less_or_equal(current_stack_info, start_frame_info), "jvmtiStackInfo would write over jvmtiFrameInfos."); assert(next_ptr_less_or_equal(current_frame_info, end), "jvmtiFrameInfo would write over the end of the buffer."); const StackTraceData* stack_trace_data = copier.get(i); if (stack_trace_data != NULL) { if (!copy_frame(stack_trace_data, current_alloc_trace, current_stack_info, current_frame_info)) { return false; } current_frame_info += current_stack_info->frame_count; current_stack_info++; current_alloc_trace++; } } return true; } void StackTraceStorage::copy_stack_traces(JvmtiEnv* env, const StackTraceDataCopier& copier, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { *traces = NULL; *trace_counter_ptr = 0; int frame_total = calculate_frame_count(copier); int len = calculate_info_count(copier); // Allocate the whole stacktraces in one bloc to simplify freeing. size_t total_size = len * sizeof(jvmtiAllocTraceInfo) + len * sizeof(jvmtiStackInfo) + frame_total * sizeof(jvmtiFrameInfo); unsigned char* buffer = NULL; jvmtiAllocTraceInfo* result = NULL; JvmtiEnvBase* env_base = reinterpret_cast(env); env_base->allocate(total_size, &buffer); if (buffer == NULL) { return; } bool success = copy_frames(copier, len, buffer, buffer + total_size); if (!success) { env_base->deallocate(buffer); return; } *trace_counter_ptr = len; *traces = reinterpret_cast(buffer); } void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) { StackTraceData* new_trace = new StackTraceData(trace.get_trace()); bool accepted = _recent_garbage_traces->store_trace(new_trace); // Accepted is on the right of the boolean to force the store_trace to happen. accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted; if (!accepted) { // No one wanted to use it. delete new_trace; } _stats.garbage_collected_samples++; } void HeapMonitoring::get_live_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { StackTraceStorage::storage()->get_all_stack_traces(env, traces, trace_counter_ptr); } void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) { const jvmtiHeapSamplingStats& internal_stats = StackTraceStorage::storage()->get_heap_sampling_stats(); *stats = internal_stats; } void HeapMonitoring::get_frequent_garbage_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { StackTraceStorage::storage()->get_frequent_garbage_stack_traces( env, traces, trace_counter_ptr); } void HeapMonitoring::get_garbage_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { StackTraceStorage::storage()->get_garbage_stack_traces(env, traces, trace_counter_ptr); } void HeapMonitoring::get_cached_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { StackTraceStorage::storage()->get_cached_stack_traces(env, traces, trace_counter_ptr); } // Invoked by the GC to clean up old stack traces and remove old arrays // of instrumentation that are still lying around. void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); StackTraceStorage::storage()->weak_oops_do(is_alive, f); } void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_gc_storage) { MutexLocker mu(HeapMonitor_lock); // Ignore if already enabled. if (enabled()) { return; } _monitoring_rate = monitoring_rate; // Populate the lookup table for fast_log2. // This approximates the log2 curve with a step function. // Steps have height equal to log2 of the mid-point of the step. for (int i = 0; i < (1 << FastLogNumBits); i++) { double half_way = static_cast(i + 0.5); _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0)); } JavaThread* t = static_cast(Thread::current()); _rnd = static_cast(reinterpret_cast(t)); if (_rnd == 0) { _rnd = 1; } StackTraceStorage::storage()->initialize(max_gc_storage); OrderAccess::release_store(&_enabled, 1); } void HeapMonitoring::stop_profiling() { MutexLocker mu(HeapMonitor_lock); if (enabled()) { StackTraceStorage::storage()->stop(); OrderAccess::release_store(&_enabled, 0); } } // Generates a geometric variable with the specified mean (512K by default). // This is done by generating a random number between 0 and 1 and applying // the inverse cumulative distribution function for an exponential. // Specifically: Let m be the inverse of the sample rate, then // the probability distribution function is m*exp(-mx) so the CDF is // p = 1 - exp(-mx), so // q = 1 - p = exp(-mx) // log_e(q) = -mx // -log_e(q)/m = x // log_2(q) * (-log_e(2) * 1/m) = x // In the code, q is actually in the range 1 to 2**26, hence the -26 below void HeapMonitoring::pick_next_sample(size_t* ptr) { _rnd = next_random(_rnd); // Take the top 26 bits as the random number // (This plus a 1<<58 sampling bound gives a max possible step of // 5194297183973780480 bytes. In this case, // for sample_parameter = 1<<19, max possible step is // 9448372 bytes (24 bits). const uint64_t PrngModPower = 48; // Number of bits in prng // The uint32_t cast is to prevent a (hard-to-reproduce) NAN // under piii debug for some binaries. double q = static_cast(_rnd >> (PrngModPower - 26)) + 1.0; // Put the computed p-value through the CDF of a geometric. // For faster performance (save ~1/20th exec time), replace // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705) // The value 26.000705 is used rather than 26 to compensate // for inaccuracies in FastLog2 which otherwise result in a // negative answer. double log_val = (fast_log2(q) - 26); size_t rate = static_cast( (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1); *ptr = rate; StackTraceStorage::storage()->accumulate_sample_rate(rate); } void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) { JavaThread* thread = static_cast(t); if (StackTraceStorage::storage()->initialized()) { assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); JavaThread* thread = static_cast(t); jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal); if (trace == NULL) { return; } jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal); if (trace == NULL) { FREE_C_HEAP_OBJ(trace); return; } trace->stack_info = stack_info; jvmtiFrameInfo* frames = NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); if (frames == NULL) { FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); return; } stack_info->frame_buffer = frames; stack_info->frame_count = 0; trace->thread_id = SharedRuntime::get_java_tid(thread); trace->size = byte_size; if (thread->has_last_Java_frame()) { // just to be safe vframeStream vfst(thread, true); int count = 0; while (!vfst.at_end() && count < MaxStackDepth) { Method* m = vfst.method(); frames[count].location = vfst.bci(); frames[count].method = m->jmethod_id(); count++; vfst.next(); } stack_info->frame_count = count; } if (stack_info->frame_count > 0) { // Success! StackTraceStorage::storage()->add_trace(trace, o); return; } // Failure! FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames); FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); } }