12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/collectedHeap.hpp"
28 #include "memory/universe.hpp"
29 #include "runtime/heapMonitoring.hpp"
30 #include "runtime/vframe.hpp"
31
32 const int MaxStackDepth = 1024;
33
34 // Internal data structure representing traces.
35 struct StackTraceData : CHeapObj<mtInternal> {
36 jvmtiStackTrace *trace;
37 oop obj;
38 int references;
39
40 StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
41
42 StackTraceData() : trace(NULL), obj(NULL), references(0) {}
43
44 // StackTraceDatas are shared around the board between various lists. So
45 // handle this by hand instead of having this in the destructor. There are
46 // cases where the struct is on the stack but holding heap data not to be
47 // freed.
48 static void free_data(StackTraceData *data) {
49 if (data->trace != NULL) {
50 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
51 FREE_C_HEAP_OBJ(data->trace);
52 }
187 // The function that gets called by the client to retrieve the list
188 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
189 void get_all_stack_traces(jvmtiStackTraces *traces);
190
191 // The function that gets called by the client to retrieve the list
192 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
193 void get_garbage_stack_traces(jvmtiStackTraces *traces);
194
195 // The function that gets called by the client to retrieve the list
196 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
197 void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces);
198
199 // Executes whenever weak references are traversed. is_alive tells
200 // you if the given oop is still reachable and live.
201 size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f);
202
203 ~StackTraceStorage();
204 StackTraceStorage();
205
206 static StackTraceStorage* storage() {
207 if (internal_storage == NULL) {
208 internal_storage = new StackTraceStorage();
209 }
210 return internal_storage;
211 }
212
213 static void reset_stack_trace_storage() {
214 delete internal_storage;
215 internal_storage = NULL;
216 }
217
218 bool is_initialized() {
219 return _initialized;
220 }
221
222 const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
223 return _stats;
224 }
225
226 // Static method to set the storage in place at initialization.
227 static void initialize_stack_trace_storage(int max_storage) {
228 reset_stack_trace_storage();
229 StackTraceStorage *storage = StackTraceStorage::storage();
230 storage->initialize_storage(max_storage);
231 }
232
233 void accumulate_sample_rate(size_t rate) {
234 _stats.sample_rate_accumulation += rate;
235 _stats.sample_rate_count++;
236 }
237
238 bool initialized() { return _initialized; }
239 volatile bool *initialized_address() { return &_initialized; }
240
241 private:
242 // The traces currently sampled.
243 GrowableArray<StackTraceData> *_allocated_traces;
244
245 // Recent garbage traces.
246 MostRecentGarbageTraces *_recent_garbage_traces;
247
248 // Frequent garbage traces.
249 FrequentGarbageTraces *_frequent_garbage_traces;
250
251 // Heap Sampling statistics.
252 jvmtiHeapSamplingStats _stats;
253
254 // Maximum amount of storage provided by the JVMTI call initialize_profiling.
255 int _max_gc_storage;
256
257 static StackTraceStorage* internal_storage;
258 volatile bool _initialized;
259
260 // Support functions and classes for copying data to the external
261 // world.
271 _data(data) {}
272 int size() const { return _data ? _data->length() : 0; }
273 const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
274
275 private:
276 GrowableArray<StackTraceData> *_data;
277 };
278
279 class GarbageStackTraceDataCopier : public StackTraceDataCopier {
280 public:
281 GarbageStackTraceDataCopier(StackTraceData **data, int size) :
282 _data(data), _size(size) {}
283 int size() const { return _size; }
284 const StackTraceData *get(uint32_t i) const { return _data[i]; }
285
286 private:
287 StackTraceData **_data;
288 int _size;
289 };
290
291 // Instance initialization.
292 void initialize_storage(int max_storage);
293
294 // Copies from StackTraceData to jvmtiStackTrace.
295 bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
296
297 // Creates a deep copy of the list of StackTraceData.
298 void copy_stack_traces(const StackTraceDataCopier &copier,
299 jvmtiStackTraces *traces);
300
301 void store_garbage_trace(const StackTraceData &trace);
302
303 void free_garbage();
304 };
305
306 StackTraceStorage* StackTraceStorage::internal_storage;
307
308 // Statics for Sampler
309 double HeapMonitoring::_log_table[1 << FastLogNumBits];
310 bool HeapMonitoring::_enabled;
311 AlwaysTrueClosure HeapMonitoring::_always_true;
312 jint HeapMonitoring::_monitoring_rate;
313
314 // Cheap random number generator
315 uint64_t HeapMonitoring::_rnd;
316
317 StackTraceStorage::StackTraceStorage() :
318 _allocated_traces(NULL),
319 _recent_garbage_traces(NULL),
320 _frequent_garbage_traces(NULL),
321 _max_gc_storage(0),
322 _initialized(false) {
323 memset(&_stats, 0, sizeof(_stats));
324 }
325
326 void StackTraceStorage::free_garbage() {
327 StackTraceData **recent_garbage = NULL;
328 uint32_t recent_size = 0;
329
330 StackTraceData **frequent_garbage = NULL;
331 uint32_t frequent_size = 0;
332
333 if (_recent_garbage_traces != NULL) {
334 recent_garbage = _recent_garbage_traces->get_traces();
335 recent_size = _recent_garbage_traces->size();
336 }
337
338 if (_frequent_garbage_traces != NULL) {
339 frequent_garbage = _frequent_garbage_traces->get_traces();
340 frequent_size = _frequent_garbage_traces->size();
341 }
342
343 // Simple solution since this happens at exit.
349
350 if (trace->references == 0) {
351 StackTraceData::free_data(trace);
352 }
353 }
354 }
355
356 // Then go through the frequent and remove those that are now only there.
357 for (uint32_t i = 0; i < frequent_size; i++) {
358 StackTraceData *trace = frequent_garbage[i];
359 if (trace != NULL) {
360 trace->references--;
361
362 if (trace->references == 0) {
363 StackTraceData::free_data(trace);
364 }
365 }
366 }
367 }
368
369 StackTraceStorage::~StackTraceStorage() {
370 delete _allocated_traces;
371
372 free_garbage();
373 delete _recent_garbage_traces;
374 delete _frequent_garbage_traces;
375 _initialized = false;
376 }
377
378 void StackTraceStorage::initialize_storage(int max_gc_storage) {
379 // In case multiple threads got locked and then 1 by 1 got through.
380 if (_initialized) {
381 return;
382 }
383
384 _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
385 GrowableArray<StackTraceData>(128, true);
386
387 _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
388 _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
389
390 _max_gc_storage = max_gc_storage;
391 _initialized = true;
392 }
393
394 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
395 StackTraceData new_data(trace, o);
396 _stats.sample_count++;
397 _stats.stack_depth_accumulation += trace->frame_count;
398 _allocated_traces->append(new_data);
399 }
400
401 size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
402 OopClosure *f) {
403 size_t count = 0;
404 if (is_initialized()) {
405 int len = _allocated_traces->length();
406
407 // Compact the oop traces. Moves the live oops to the beginning of the
408 // growable array, potentially overwriting the dead ones.
409 int curr_pos = 0;
410 for (int i = 0; i < len; i++) {
411 StackTraceData &trace = _allocated_traces->at(i);
412 oop value = trace.obj;
413 if (Universe::heap()->is_in_reserved(value)
414 && is_alive->do_object_b(value)) {
415 // Update the oop to point to the new object if it is still alive.
416 f->do_oop(&(trace.obj));
417
418 // Copy the old trace, if it is still live.
419 _allocated_traces->at_put(curr_pos++, trace);
420
421 count++;
422 } else {
423 // If the old trace is no longer live, add it to the list of
424 // recently collected garbage.
467 }
468
469 // See comment on get_all_stack_traces
470 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
471 GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
472 _recent_garbage_traces->size());
473 copy_stack_traces(copier, traces);
474 }
475
476 // See comment on get_all_stack_traces
477 void StackTraceStorage::get_frequent_garbage_stack_traces(
478 jvmtiStackTraces *traces) {
479 GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
480 _frequent_garbage_traces->size());
481 copy_stack_traces(copier, traces);
482 }
483
484
485 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
486 jvmtiStackTraces *traces) {
487 int len = copier.size();
488
489 // Create a new array to store the StackTraceData objects.
490 // + 1 for a NULL at the end.
491 jvmtiStackTrace *t =
492 NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
493 if (t == NULL) {
494 traces->stack_traces = NULL;
495 traces->trace_count = 0;
496 return;
497 }
498 // +1 to have a NULL at the end of the array.
499 memset(t, 0, (len + 1) * sizeof(*t));
500
501 // Copy the StackTraceData objects into the new array.
502 int trace_count = 0;
503 for (int i = 0; i < len; i++) {
504 const StackTraceData *stack_trace = copier.get(i);
505 if (stack_trace != NULL) {
506 jvmtiStackTrace *to = &t[trace_count];
575 traces->stack_traces = NULL;
576 }
577
578 // Invoked by the GC to clean up old stack traces and remove old arrays
579 // of instrumentation that are still lying around.
580 size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive,
581 OopClosure *f) {
582 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
583 return StackTraceStorage::storage()->weak_oops_do(is_alive, f);
584 }
585
586 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
587 jint max_gc_storage) {
588 // Ignore if already enabled.
589 if (_enabled) {
590 return;
591 }
592
593 _monitoring_rate = monitoring_rate;
594
595 // Initalize and reset.
596 StackTraceStorage::initialize_stack_trace_storage(max_gc_storage);
597
598 // Populate the lookup table for fast_log2.
599 // This approximates the log2 curve with a step function.
600 // Steps have height equal to log2 of the mid-point of the step.
601 for (int i = 0; i < (1 << FastLogNumBits); i++) {
602 double half_way = static_cast<double>(i + 0.5);
603 _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
604 }
605
606 JavaThread *t = static_cast<JavaThread *>(Thread::current());
607 _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
608 if (_rnd == 0) {
609 _rnd = 1;
610 }
611 _enabled = true;
612 }
613
614 void HeapMonitoring::stop_profiling() {
615 _enabled = false;
616 }
617
618 // Generates a geometric variable with the specified mean (512K by default).
619 // This is done by generating a random number between 0 and 1 and applying
620 // the inverse cumulative distribution function for an exponential.
621 // Specifically: Let m be the inverse of the sample rate, then
622 // the probability distribution function is m*exp(-mx) so the CDF is
623 // p = 1 - exp(-mx), so
624 // q = 1 - p = exp(-mx)
625 // log_e(q) = -mx
626 // -log_e(q)/m = x
627 // log_2(q) * (-log_e(2) * 1/m) = x
628 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
629 void HeapMonitoring::pick_next_sample(size_t *ptr) {
630 _rnd = next_random(_rnd);
637 // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
638 // under piii debug for some binaries.
639 double q = static_cast<uint32_t>(_rnd >> (prng_mod_power - 26)) + 1.0;
640 // Put the computed p-value through the CDF of a geometric.
641 // For faster performance (save ~1/20th exec time), replace
642 // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705)
643 // The value 26.000705 is used rather than 26 to compensate
644 // for inaccuracies in FastLog2 which otherwise result in a
645 // negative answer.
646 double log_val = (fast_log2(q) - 26);
647 size_t rate = static_cast<size_t>(
648 (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
649 *ptr = rate;
650
651 StackTraceStorage::storage()->accumulate_sample_rate(rate);
652 }
653
654 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
655 #if defined(X86) || defined(PPC)
656 JavaThread *thread = static_cast<JavaThread *>(t);
657 if (StackTraceStorage::storage()->is_initialized()) {
658 assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
659 JavaThread *thread = static_cast<JavaThread *>(t);
660
661 jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
662 if (trace == NULL) {
663 return;
664 }
665
666 jvmtiFrameInfo *frames =
667 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
668
669 if (frames == NULL) {
670 FREE_C_HEAP_OBJ(trace);
671 return;
672 }
673
674 trace->frames = frames;
675 trace->thread_id = SharedRuntime::get_java_tid(thread);
676 trace->size = byte_size;
677 trace->frame_count = 0;
682 while (!vfst.at_end() && count < MaxStackDepth) {
683 Method* m = vfst.method();
684 frames[count].location = vfst.bci();
685 frames[count].method = m->jmethod_id();
686 count++;
687
688 vfst.next();
689 }
690 trace->frame_count = count;
691 }
692
693 if (trace->frame_count> 0) {
694 // Success!
695 StackTraceStorage::storage()->add_trace(trace, o);
696 return;
697 }
698
699 // Failure!
700 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);
701 FREE_C_HEAP_OBJ(trace);
702 return;
703 } else {
704 // There is something like 64K worth of allocation before the VM
705 // initializes. This is just in the interests of not slowing down
706 // startup.
707 assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
708 }
709 #else
710 Unimplemented();
711 #endif
712 }
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/collectedHeap.hpp"
28 #include "memory/universe.hpp"
29 #include "runtime/heapMonitoring.hpp"
30 #include "runtime/vframe.hpp"
31
32 static const int MaxStackDepth = 1024;
33
34 // Internal data structure representing traces.
35 struct StackTraceData : CHeapObj<mtInternal> {
36 jvmtiStackTrace *trace;
37 oop obj;
38 int references;
39
40 StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
41
42 StackTraceData() : trace(NULL), obj(NULL), references(0) {}
43
44 // StackTraceDatas are shared around the board between various lists. So
45 // handle this by hand instead of having this in the destructor. There are
46 // cases where the struct is on the stack but holding heap data not to be
47 // freed.
48 static void free_data(StackTraceData *data) {
49 if (data->trace != NULL) {
50 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
51 FREE_C_HEAP_OBJ(data->trace);
52 }
187 // The function that gets called by the client to retrieve the list
188 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
189 void get_all_stack_traces(jvmtiStackTraces *traces);
190
191 // The function that gets called by the client to retrieve the list
192 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
193 void get_garbage_stack_traces(jvmtiStackTraces *traces);
194
195 // The function that gets called by the client to retrieve the list
196 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
197 void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces);
198
199 // Executes whenever weak references are traversed. is_alive tells
200 // you if the given oop is still reachable and live.
201 size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f);
202
203 ~StackTraceStorage();
204 StackTraceStorage();
205
206 static StackTraceStorage* storage() {
207 static StackTraceStorage internal_storage;
208 return &internal_storage;
209 }
210
211 void initialize(int max_storage) {
212 MutexLocker mu(HeapMonitor_lock);
213 free_storage();
214 allocate_storage(max_storage);
215 memset(&_stats, 0, sizeof(_stats));
216 }
217
218 const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
219 return _stats;
220 }
221
222 void accumulate_sample_rate(size_t rate) {
223 _stats.sample_rate_accumulation += rate;
224 _stats.sample_rate_count++;
225 }
226
227 bool initialized() { return _initialized; }
228 volatile bool *initialized_address() { return &_initialized; }
229
230 private:
231 // Protects the traces currently sampled (below).
232 volatile intptr_t _stack_storage_lock[1];
233
234 // The traces currently sampled.
235 GrowableArray<StackTraceData> *_allocated_traces;
236
237 // Recent garbage traces.
238 MostRecentGarbageTraces *_recent_garbage_traces;
239
240 // Frequent garbage traces.
241 FrequentGarbageTraces *_frequent_garbage_traces;
242
243 // Heap Sampling statistics.
244 jvmtiHeapSamplingStats _stats;
245
246 // Maximum amount of storage provided by the JVMTI call initialize_profiling.
247 int _max_gc_storage;
248
249 static StackTraceStorage* internal_storage;
250 volatile bool _initialized;
251
252 // Support functions and classes for copying data to the external
253 // world.
263 _data(data) {}
264 int size() const { return _data ? _data->length() : 0; }
265 const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
266
267 private:
268 GrowableArray<StackTraceData> *_data;
269 };
270
271 class GarbageStackTraceDataCopier : public StackTraceDataCopier {
272 public:
273 GarbageStackTraceDataCopier(StackTraceData **data, int size) :
274 _data(data), _size(size) {}
275 int size() const { return _size; }
276 const StackTraceData *get(uint32_t i) const { return _data[i]; }
277
278 private:
279 StackTraceData **_data;
280 int _size;
281 };
282
283 // Copies from StackTraceData to jvmtiStackTrace.
284 bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
285
286 // Creates a deep copy of the list of StackTraceData.
287 void copy_stack_traces(const StackTraceDataCopier &copier,
288 jvmtiStackTraces *traces);
289
290 void store_garbage_trace(const StackTraceData &trace);
291
292 void free_garbage();
293 void free_storage();
294 void allocate_storage(int max_gc_storage);
295 };
296
297 StackTraceStorage* StackTraceStorage::internal_storage;
298
299 // Statics for Sampler
300 double HeapMonitoring::_log_table[1 << FastLogNumBits];
301 bool HeapMonitoring::_enabled;
302 AlwaysTrueClosure HeapMonitoring::_always_true;
303 jint HeapMonitoring::_monitoring_rate;
304
305 // Cheap random number generator
306 uint64_t HeapMonitoring::_rnd;
307
308 StackTraceStorage::StackTraceStorage() :
309 _allocated_traces(NULL),
310 _recent_garbage_traces(NULL),
311 _frequent_garbage_traces(NULL),
312 _max_gc_storage(0),
313 _initialized(false) {
314 _stack_storage_lock[0] = 0;
315 }
316
317 void StackTraceStorage::free_garbage() {
318 StackTraceData **recent_garbage = NULL;
319 uint32_t recent_size = 0;
320
321 StackTraceData **frequent_garbage = NULL;
322 uint32_t frequent_size = 0;
323
324 if (_recent_garbage_traces != NULL) {
325 recent_garbage = _recent_garbage_traces->get_traces();
326 recent_size = _recent_garbage_traces->size();
327 }
328
329 if (_frequent_garbage_traces != NULL) {
330 frequent_garbage = _frequent_garbage_traces->get_traces();
331 frequent_size = _frequent_garbage_traces->size();
332 }
333
334 // Simple solution since this happens at exit.
340
341 if (trace->references == 0) {
342 StackTraceData::free_data(trace);
343 }
344 }
345 }
346
347 // Then go through the frequent and remove those that are now only there.
348 for (uint32_t i = 0; i < frequent_size; i++) {
349 StackTraceData *trace = frequent_garbage[i];
350 if (trace != NULL) {
351 trace->references--;
352
353 if (trace->references == 0) {
354 StackTraceData::free_data(trace);
355 }
356 }
357 }
358 }
359
360 void StackTraceStorage::free_storage() {
361 delete _allocated_traces;
362
363 free_garbage();
364 delete _recent_garbage_traces;
365 delete _frequent_garbage_traces;
366 _initialized = false;
367 }
368
369 StackTraceStorage::~StackTraceStorage() {
370 free_storage();
371 }
372
373 void StackTraceStorage::allocate_storage(int max_gc_storage) {
374 // In case multiple threads got locked and then 1 by 1 got through.
375 if (_initialized) {
376 return;
377 }
378
379 _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
380 GrowableArray<StackTraceData>(128, true);
381
382 _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
383 _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
384
385 _max_gc_storage = max_gc_storage;
386 _initialized = true;
387 }
388
389 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
390 MutexLocker mu(HeapMonitor_lock);
391 StackTraceData new_data(trace, o);
392 _stats.sample_count++;
393 _stats.stack_depth_accumulation += trace->frame_count;
394 _allocated_traces->append(new_data);
395 }
396
397 size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
398 OopClosure *f) {
399 MutexLocker mu(HeapMonitor_lock);
400 size_t count = 0;
401 if (initialized()) {
402 int len = _allocated_traces->length();
403
404 // Compact the oop traces. Moves the live oops to the beginning of the
405 // growable array, potentially overwriting the dead ones.
406 int curr_pos = 0;
407 for (int i = 0; i < len; i++) {
408 StackTraceData &trace = _allocated_traces->at(i);
409 oop value = trace.obj;
410 if (Universe::heap()->is_in_reserved(value)
411 && is_alive->do_object_b(value)) {
412 // Update the oop to point to the new object if it is still alive.
413 f->do_oop(&(trace.obj));
414
415 // Copy the old trace, if it is still live.
416 _allocated_traces->at_put(curr_pos++, trace);
417
418 count++;
419 } else {
420 // If the old trace is no longer live, add it to the list of
421 // recently collected garbage.
464 }
465
466 // See comment on get_all_stack_traces
467 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
468 GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
469 _recent_garbage_traces->size());
470 copy_stack_traces(copier, traces);
471 }
472
473 // See comment on get_all_stack_traces
474 void StackTraceStorage::get_frequent_garbage_stack_traces(
475 jvmtiStackTraces *traces) {
476 GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
477 _frequent_garbage_traces->size());
478 copy_stack_traces(copier, traces);
479 }
480
481
482 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
483 jvmtiStackTraces *traces) {
484 MutexLocker mu(HeapMonitor_lock);
485 int len = copier.size();
486
487 // Create a new array to store the StackTraceData objects.
488 // + 1 for a NULL at the end.
489 jvmtiStackTrace *t =
490 NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
491 if (t == NULL) {
492 traces->stack_traces = NULL;
493 traces->trace_count = 0;
494 return;
495 }
496 // +1 to have a NULL at the end of the array.
497 memset(t, 0, (len + 1) * sizeof(*t));
498
499 // Copy the StackTraceData objects into the new array.
500 int trace_count = 0;
501 for (int i = 0; i < len; i++) {
502 const StackTraceData *stack_trace = copier.get(i);
503 if (stack_trace != NULL) {
504 jvmtiStackTrace *to = &t[trace_count];
573 traces->stack_traces = NULL;
574 }
575
576 // Invoked by the GC to clean up old stack traces and remove old arrays
577 // of instrumentation that are still lying around.
578 size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive,
579 OopClosure *f) {
580 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
581 return StackTraceStorage::storage()->weak_oops_do(is_alive, f);
582 }
583
584 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
585 jint max_gc_storage) {
586 // Ignore if already enabled.
587 if (_enabled) {
588 return;
589 }
590
591 _monitoring_rate = monitoring_rate;
592
593 // Populate the lookup table for fast_log2.
594 // This approximates the log2 curve with a step function.
595 // Steps have height equal to log2 of the mid-point of the step.
596 for (int i = 0; i < (1 << FastLogNumBits); i++) {
597 double half_way = static_cast<double>(i + 0.5);
598 _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
599 }
600
601 JavaThread *t = static_cast<JavaThread *>(Thread::current());
602 _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
603 if (_rnd == 0) {
604 _rnd = 1;
605 }
606
607 StackTraceStorage::storage()->initialize(max_gc_storage);
608 _enabled = true;
609 }
610
611 void HeapMonitoring::stop_profiling() {
612 _enabled = false;
613 }
614
615 // Generates a geometric variable with the specified mean (512K by default).
616 // This is done by generating a random number between 0 and 1 and applying
617 // the inverse cumulative distribution function for an exponential.
618 // Specifically: Let m be the inverse of the sample rate, then
619 // the probability distribution function is m*exp(-mx) so the CDF is
620 // p = 1 - exp(-mx), so
621 // q = 1 - p = exp(-mx)
622 // log_e(q) = -mx
623 // -log_e(q)/m = x
624 // log_2(q) * (-log_e(2) * 1/m) = x
625 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
626 void HeapMonitoring::pick_next_sample(size_t *ptr) {
627 _rnd = next_random(_rnd);
634 // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
635 // under piii debug for some binaries.
636 double q = static_cast<uint32_t>(_rnd >> (prng_mod_power - 26)) + 1.0;
637 // Put the computed p-value through the CDF of a geometric.
638 // For faster performance (save ~1/20th exec time), replace
639 // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705)
640 // The value 26.000705 is used rather than 26 to compensate
641 // for inaccuracies in FastLog2 which otherwise result in a
642 // negative answer.
643 double log_val = (fast_log2(q) - 26);
644 size_t rate = static_cast<size_t>(
645 (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
646 *ptr = rate;
647
648 StackTraceStorage::storage()->accumulate_sample_rate(rate);
649 }
650
651 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
652 #if defined(X86) || defined(PPC)
653 JavaThread *thread = static_cast<JavaThread *>(t);
654 if (StackTraceStorage::storage()->initialized()) {
655 assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
656 JavaThread *thread = static_cast<JavaThread *>(t);
657
658 jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
659 if (trace == NULL) {
660 return;
661 }
662
663 jvmtiFrameInfo *frames =
664 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
665
666 if (frames == NULL) {
667 FREE_C_HEAP_OBJ(trace);
668 return;
669 }
670
671 trace->frames = frames;
672 trace->thread_id = SharedRuntime::get_java_tid(thread);
673 trace->size = byte_size;
674 trace->frame_count = 0;
679 while (!vfst.at_end() && count < MaxStackDepth) {
680 Method* m = vfst.method();
681 frames[count].location = vfst.bci();
682 frames[count].method = m->jmethod_id();
683 count++;
684
685 vfst.next();
686 }
687 trace->frame_count = count;
688 }
689
690 if (trace->frame_count> 0) {
691 // Success!
692 StackTraceStorage::storage()->add_trace(trace, o);
693 return;
694 }
695
696 // Failure!
697 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);
698 FREE_C_HEAP_OBJ(trace);
699 }
700 #else
701 Unimplemented();
702 #endif
703 }
|