14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/collectedHeap.hpp"
28 #include "memory/universe.hpp"
29 #include "runtime/heapMonitoring.hpp"
30 #include "runtime/vframe.hpp"
31
32 static const int MaxStackDepth = 1024;
33
34 // Internal data structure representing traces.
35 struct StackTraceData : CHeapObj<mtInternal> {
36 jvmtiStackTrace *trace;
37 oop obj;
38 int references;
39
40 StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
41
42 StackTraceData() : trace(NULL), obj(NULL), references(0) {}
43
44 // StackTraceDatas are shared around the board between various lists. So
45 // handle this by hand instead of having this in the destructor. There are
46 // cases where the struct is on the stack but holding heap data not to be
47 // freed.
48 static void free_data(StackTraceData *data) {
49 if (data->trace != NULL) {
50 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
51 FREE_C_HEAP_OBJ(data->trace);
52 }
53 delete data;
54 }
55 };
56
57 // Fixed size buffer for holding garbage traces.
58 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
59 public:
60 GarbageTracesBuffer(uint32_t size) : _size(size) {
61 _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
62 size,
63 mtInternal);
64 memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
65 }
66
67 virtual ~GarbageTracesBuffer() {
68 FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
69 }
70
71 StackTraceData** get_traces() const {
72 return _garbage_traces;
73 }
74
75 bool store_trace(StackTraceData *trace) {
76 uint32_t index;
77 if (!select_replacement(&index)) {
78 return false;
79 }
80
81 StackTraceData *old_data = _garbage_traces[index];
82
83 if (old_data != NULL) {
84 old_data->references--;
85
86 if (old_data->references == 0) {
87 StackTraceData::free_data(old_data);
88 }
89 }
90
91 trace->references++;
92 _garbage_traces[index] = trace;
93 return true;
94 }
95
96 uint32_t size() const {
97 return _size;
98 }
99
100 protected:
101 // Subclasses select the trace to replace. Returns false if no replacement
102 // is to happen, otherwise stores the index of the trace to replace in
103 // *index.
104 virtual bool select_replacement(uint32_t *index) = 0;
105
106 const uint32_t _size;
107
108 private:
109 // The current garbage traces. A fixed-size ring buffer.
110 StackTraceData **_garbage_traces;
111 };
112
113 // Keep statistical sample of traces over the lifetime of the server.
114 // When the buffer is full, replace a random entry with probability
115 // 1/samples_seen. This strategy tends towards preserving the most frequently
116 // occuring traces over time.
117 class FrequentGarbageTraces : public GarbageTracesBuffer {
118 public:
119 FrequentGarbageTraces(int size)
120 : GarbageTracesBuffer(size),
121 _garbage_traces_pos(0),
122 _samples_seen(0) {
123 }
124
125 virtual ~FrequentGarbageTraces() {
126 }
127
128 virtual bool select_replacement(uint32_t* index) {
129 ++_samples_seen;
130
165
166 virtual bool select_replacement(uint32_t* index) {
167 *index = _garbage_traces_pos;
168
169 _garbage_traces_pos =
170 (_garbage_traces_pos + 1) % _size;
171
172 return true;
173 }
174
175 private:
176 // The current position in the buffer.
177 uint32_t _garbage_traces_pos;
178 };
179
180 // Each object that we profile is stored as trace with the thread_id.
181 class StackTraceStorage : public CHeapObj<mtInternal> {
182 public:
183 // The function that gets called to add a trace to the list of
184 // traces we are maintaining.
185 void add_trace(jvmtiStackTrace *trace, oop o);
186
187 // The function that gets called by the client to retrieve the list
188 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
189 void get_all_stack_traces(jvmtiStackTraces *traces);
190
191 // The function that gets called by the client to retrieve the list
192 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
193 void get_garbage_stack_traces(jvmtiStackTraces *traces);
194
195 // The function that gets called by the client to retrieve the list
196 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
197 void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces);
198
199 // The function that gets called by the client to retrieve the list
200 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
201 void get_cached_stack_traces(jvmtiStackTraces *traces);
202
203 // Executes whenever weak references are traversed. is_alive tells
204 // you if the given oop is still reachable and live.
205 void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f);
206
207 ~StackTraceStorage();
208 StackTraceStorage();
209
210 static StackTraceStorage* storage() {
211 static StackTraceStorage internal_storage;
212 return &internal_storage;
213 }
214
215 void initialize(int max_storage) {
216 MutexLocker mu(HeapMonitorStorage_lock);
217 allocate_storage(max_storage);
218 memset(&_stats, 0, sizeof(_stats));
219 }
220
221 void stop() {
222 MutexLocker mu(HeapMonitorStorage_lock);
223 free_storage();
224 }
225
226 const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
227 return _stats;
228 }
229
230 void accumulate_sample_rate(size_t rate) {
231 _stats.sample_rate_accumulation += rate;
232 _stats.sample_rate_count++;
233 }
234
235 bool initialized() { return _initialized; }
236
237 private:
238 // The traces currently sampled.
239 GrowableArray<StackTraceData> *_allocated_traces;
240
241 // The traces currently sampled.
242 GrowableArray<StackTraceData> *_traces_on_last_full_gc;
243
244 // Recent garbage traces.
245 MostRecentGarbageTraces *_recent_garbage_traces;
246
247 // Frequent garbage traces.
248 FrequentGarbageTraces *_frequent_garbage_traces;
249
250 // Heap Sampling statistics.
251 jvmtiHeapSamplingStats _stats;
252
253 // Maximum amount of storage provided by the JVMTI call initialize_profiling.
254 int _max_gc_storage;
255
256 static StackTraceStorage* internal_storage;
257 volatile bool _initialized;
258
259 // Support functions and classes for copying data to the external
260 // world.
261 class StackTraceDataCopier {
262 public:
263 virtual int size() const = 0;
264 virtual const StackTraceData *get(uint32_t i) const = 0;
265 };
266
267 class LiveStackTraceDataCopier : public StackTraceDataCopier {
268 public:
269 LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) :
270 _data(data) {}
271 int size() const { return _data ? _data->length() : 0; }
272 const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
273
274 private:
275 GrowableArray<StackTraceData> *_data;
276 };
277
278 class GarbageStackTraceDataCopier : public StackTraceDataCopier {
279 public:
280 GarbageStackTraceDataCopier(StackTraceData **data, int size) :
281 _data(data), _size(size) {}
282 int size() const { return _size; }
283 const StackTraceData *get(uint32_t i) const { return _data[i]; }
284
285 private:
286 StackTraceData **_data;
287 int _size;
288 };
289
290 // Copies from StackTraceData to jvmtiStackTrace.
291 bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
292
293 // Creates a deep copy of the list of StackTraceData.
294 void copy_stack_traces(const StackTraceDataCopier &copier,
295 jvmtiStackTraces *traces);
296
297 void store_garbage_trace(const StackTraceData &trace);
298
299 void free_garbage();
300 void free_storage();
301 void reset();
302
303 void allocate_storage(int max_gc_storage);
304 };
305
306 StackTraceStorage* StackTraceStorage::internal_storage;
307
308 // Statics for Sampler
309 double HeapMonitoring::_log_table[1 << FastLogNumBits];
310 bool HeapMonitoring::_enabled;
311 jint HeapMonitoring::_monitoring_rate;
312
313 // Cheap random number generator
314 uint64_t HeapMonitoring::_rnd;
315
316 StackTraceStorage::StackTraceStorage() {
317 reset();
318 }
319
320 void StackTraceStorage::reset() {
321 _allocated_traces = NULL;
322 _traces_on_last_full_gc = NULL;
323 _recent_garbage_traces = NULL;
324 _frequent_garbage_traces = NULL;
325 _max_gc_storage = 0;
326 _initialized = false;
327 }
328
329 void StackTraceStorage::free_garbage() {
330 StackTraceData **recent_garbage = NULL;
331 uint32_t recent_size = 0;
332
333 StackTraceData **frequent_garbage = NULL;
334 uint32_t frequent_size = 0;
335
336 if (_recent_garbage_traces != NULL) {
337 recent_garbage = _recent_garbage_traces->get_traces();
338 recent_size = _recent_garbage_traces->size();
339 }
340
341 if (_frequent_garbage_traces != NULL) {
342 frequent_garbage = _frequent_garbage_traces->get_traces();
343 frequent_size = _frequent_garbage_traces->size();
344 }
345
346 // Simple solution since this happens at exit.
347 // Go through the recent and remove any that only are referenced there.
348 for (uint32_t i = 0; i < recent_size; i++) {
349 StackTraceData *trace = recent_garbage[i];
350 if (trace != NULL) {
351 trace->references--;
352
353 if (trace->references == 0) {
354 StackTraceData::free_data(trace);
355 }
356 }
357 }
358
359 // Then go through the frequent and remove those that are now only there.
360 for (uint32_t i = 0; i < frequent_size; i++) {
361 StackTraceData *trace = frequent_garbage[i];
362 if (trace != NULL) {
363 trace->references--;
364
365 if (trace->references == 0) {
366 StackTraceData::free_data(trace);
367 }
368 }
369 }
370 }
371
372 void StackTraceStorage::free_storage() {
373 if (!_initialized) {
374 return;
375 }
376
377 delete _allocated_traces;
378 delete _traces_on_last_full_gc;
379
380 free_garbage();
381 delete _recent_garbage_traces;
382 delete _frequent_garbage_traces;
383
384 reset();
385 }
386
387 StackTraceStorage::~StackTraceStorage() {
388 free_storage();
389 }
390
391 void StackTraceStorage::allocate_storage(int max_gc_storage) {
392 // In case multiple threads got locked and then 1 by 1 got through.
393 if (_initialized) {
394 return;
395 }
396
397 _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
398 GrowableArray<StackTraceData>(128, true);
399 _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
400 GrowableArray<StackTraceData>(128, true);
401
402 _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
403 _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
404
405 _max_gc_storage = max_gc_storage;
406 _initialized = true;
407 }
408
409 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
410 MutexLocker mu(HeapMonitorStorage_lock);
411 // Last minute check on initialization here in case:
412 // Between the moment object_alloc_do_sample's check for initialization
413 // and now, there was a stop() that deleted the data.
414 if (_initialized) {
415 StackTraceData new_data(trace, o);
416 _stats.sample_count++;
417 _stats.stack_depth_accumulation += trace->frame_count;
418 _allocated_traces->append(new_data);
419 }
420 }
421
422 void StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
423 OopClosure *f) {
424 MutexLocker mu(HeapMonitorStorage_lock);
425 size_t count = 0;
426 if (_initialized) {
427 int len = _allocated_traces->length();
428
429 _traces_on_last_full_gc->clear();
430
431 // Compact the oop traces. Moves the live oops to the beginning of the
432 // growable array, potentially overwriting the dead ones.
433 int curr_pos = 0;
434 for (int i = 0; i < len; i++) {
435 StackTraceData &trace = _allocated_traces->at(i);
436 oop value = trace.obj;
437 if (Universe::heap()->is_in_reserved(value)
438 && is_alive->do_object_b(value)) {
439 // Update the oop to point to the new object if it is still alive.
440 f->do_oop(&(trace.obj));
441
442 // Copy the old trace, if it is still live.
443 _allocated_traces->at_put(curr_pos++, trace);
444
445 // Store the live trace in a cache, to be served up on /heapz.
446 _traces_on_last_full_gc->append(trace);
447
448 count++;
449 } else {
450 // If the old trace is no longer live, add it to the list of
451 // recently collected garbage.
452 store_garbage_trace(trace);
453 }
454 }
455
456 // Zero out remaining array elements. Even though the call to trunc_to
457 // below truncates these values, zeroing them out is good practice.
458 StackTraceData zero_trace;
459 for (int i = curr_pos; i < len; i++) {
460 _allocated_traces->at_put(i, zero_trace);
461 }
462
463 // Set the array's length to the number of live elements.
464 _allocated_traces->trunc_to(curr_pos);
465 }
466
467 log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
468 }
469
470 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to,
471 const StackTraceData *from) {
472 const jvmtiStackTrace *src = from->trace;
473 *to = *src;
474
475 to->frames =
476 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
477
478 if (to->frames == NULL) {
479 return false;
480 }
481
482 memcpy(to->frames,
483 src->frames,
484 sizeof(jvmtiFrameInfo) * src->frame_count);
485 return true;
486 }
487
488 // Called by the outside world; returns a copy of the stack traces
489 // (because we could be replacing them as the user handles them).
490 // The array is secretly null-terminated (to make it easier to reclaim).
491 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) {
492 if (!_allocated_traces) {
493 traces->stack_traces = NULL;
494 traces->trace_count = 0;
495 return;
496 }
497
498 LiveStackTraceDataCopier copier(_allocated_traces);
499 copy_stack_traces(copier, traces);
500 }
501
502 // See comment on get_all_stack_traces
503 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
504 if (!_recent_garbage_traces) {
505 traces->stack_traces = NULL;
506 traces->trace_count = 0;
507 return;
508 }
509
510 GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
511 _recent_garbage_traces->size());
512 copy_stack_traces(copier, traces);
513 }
514
515 // See comment on get_all_stack_traces
516 void StackTraceStorage::get_frequent_garbage_stack_traces(
517 jvmtiStackTraces *traces) {
518 if (!_frequent_garbage_traces) {
519 traces->stack_traces = NULL;
520 traces->trace_count = 0;
521 return;
522 }
523
524 GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
525 _frequent_garbage_traces->size());
526 copy_stack_traces(copier, traces);
527 }
528
529 // See comment on get_all_stack_traces
530 void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces *traces) {
531 if (!_traces_on_last_full_gc) {
532 traces->stack_traces = NULL;
533 traces->trace_count = 0;
534 return;
535 }
536
537 LiveStackTraceDataCopier copier(_traces_on_last_full_gc);
538 copy_stack_traces(copier, traces);
539 }
540
541 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
542 jvmtiStackTraces *traces) {
543 MutexLocker mu(HeapMonitorStorage_lock);
544 int len = copier.size();
545
546 // Create a new array to store the StackTraceData objects.
547 // + 1 for a NULL at the end.
548 jvmtiStackTrace *t =
549 NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
550 if (t == NULL) {
551 traces->stack_traces = NULL;
552 traces->trace_count = 0;
553 return;
554 }
555 // +1 to have a NULL at the end of the array.
556 memset(t, 0, (len + 1) * sizeof(*t));
557
558 // Copy the StackTraceData objects into the new array.
559 int trace_count = 0;
560 for (int i = 0; i < len; i++) {
561 const StackTraceData *stack_trace = copier.get(i);
562 if (stack_trace != NULL) {
563 jvmtiStackTrace *to = &t[trace_count];
564 if (!deep_copy(to, stack_trace)) {
565 continue;
566 }
567 trace_count++;
568 }
569 }
570
571 traces->stack_traces = t;
572 traces->trace_count = trace_count;
573 }
574
575 void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) {
576 StackTraceData *new_trace = new StackTraceData();
577 *new_trace = trace;
578
579 bool accepted = _recent_garbage_traces->store_trace(new_trace);
580
581 // Accepted is on the right of the boolean to force the store_trace to happen.
582 accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
583
584 if (!accepted) {
585 // No one wanted to use it.
586 delete new_trace;
587 }
588
589 _stats.garbage_collected_samples++;
590 }
591
592 void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) {
593 StackTraceStorage::storage()->get_all_stack_traces(traces);
594 }
595
596 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) {
597 const jvmtiHeapSamplingStats& internal_stats =
598 StackTraceStorage::storage()->get_heap_sampling_stats();
599 *stats = internal_stats;
600 }
601
602 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) {
603 StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
604 }
605
606 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) {
607 StackTraceStorage::storage()->get_garbage_stack_traces(traces);
608 }
609
610 void HeapMonitoring::get_cached_traces(jvmtiStackTraces *traces) {
611 StackTraceStorage::storage()->get_cached_stack_traces(traces);
612 }
613
614 void HeapMonitoring::release_traces(jvmtiStackTraces *traces) {
615 jint trace_count = traces->trace_count;
616 jvmtiStackTrace *stack_traces = traces->stack_traces;
617
618 for (jint i = 0; i < trace_count; i++) {
619 jvmtiStackTrace *current_trace = stack_traces + i;
620 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
621 }
622
623 FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
624 traces->trace_count = 0;
625 traces->stack_traces = NULL;
626 }
627
628 // Invoked by the GC to clean up old stack traces and remove old arrays
629 // of instrumentation that are still lying around.
630 void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f) {
631 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
632 StackTraceStorage::storage()->weak_oops_do(is_alive, f);
633 }
634
635 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
636 jint max_gc_storage) {
637 MutexLocker mu(HeapMonitor_lock);
638 // Ignore if already enabled.
639 if (_enabled) {
640 return;
641 }
642
643 _monitoring_rate = monitoring_rate;
644
645 // Populate the lookup table for fast_log2.
646 // This approximates the log2 curve with a step function.
647 // Steps have height equal to log2 of the mid-point of the step.
648 for (int i = 0; i < (1 << FastLogNumBits); i++) {
649 double half_way = static_cast<double>(i + 0.5);
650 _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
651 }
652
653 JavaThread *t = static_cast<JavaThread *>(Thread::current());
654 _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
655 if (_rnd == 0) {
656 _rnd = 1;
657 }
658
659 StackTraceStorage::storage()->initialize(max_gc_storage);
660 _enabled = true;
661 }
662
663 void HeapMonitoring::stop_profiling() {
664 MutexLocker mu(HeapMonitor_lock);
665 StackTraceStorage::storage()->stop();
666 _enabled = false;
667 }
668
669 // Generates a geometric variable with the specified mean (512K by default).
670 // This is done by generating a random number between 0 and 1 and applying
671 // the inverse cumulative distribution function for an exponential.
672 // Specifically: Let m be the inverse of the sample rate, then
673 // the probability distribution function is m*exp(-mx) so the CDF is
674 // p = 1 - exp(-mx), so
675 // q = 1 - p = exp(-mx)
676 // log_e(q) = -mx
677 // -log_e(q)/m = x
678 // log_2(q) * (-log_e(2) * 1/m) = x
679 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
680 void HeapMonitoring::pick_next_sample(size_t *ptr) {
681 _rnd = next_random(_rnd);
682 // Take the top 26 bits as the random number
683 // (This plus a 1<<58 sampling bound gives a max possible step of
684 // 5194297183973780480 bytes. In this case,
685 // for sample_parameter = 1<<19, max possible step is
686 // 9448372 bytes (24 bits).
687 const uint64_t PrngModPower = 48; // Number of bits in prng
688 // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
689 // under piii debug for some binaries.
690 double q = static_cast<uint32_t>(_rnd >> (PrngModPower - 26)) + 1.0;
691 // Put the computed p-value through the CDF of a geometric.
692 // For faster performance (save ~1/20th exec time), replace
693 // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705)
694 // The value 26.000705 is used rather than 26 to compensate
695 // for inaccuracies in FastLog2 which otherwise result in a
696 // negative answer.
697 double log_val = (fast_log2(q) - 26);
698 size_t rate = static_cast<size_t>(
699 (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
700 *ptr = rate;
701
702 StackTraceStorage::storage()->accumulate_sample_rate(rate);
703 }
704
705 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
706 JavaThread *thread = static_cast<JavaThread *>(t);
707 if (StackTraceStorage::storage()->initialized()) {
708 assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
709 JavaThread *thread = static_cast<JavaThread *>(t);
710
711 jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
712 if (trace == NULL) {
713 return;
714 }
715
716 jvmtiFrameInfo *frames =
717 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
718
719 if (frames == NULL) {
720 FREE_C_HEAP_OBJ(trace);
721 return;
722 }
723
724 trace->frames = frames;
725 trace->thread_id = SharedRuntime::get_java_tid(thread);
726 trace->size = byte_size;
727 trace->frame_count = 0;
728
729 if (thread->has_last_Java_frame()) { // just to be safe
730 vframeStream vfst(thread, true);
731 int count = 0;
732 while (!vfst.at_end() && count < MaxStackDepth) {
733 Method* m = vfst.method();
734 frames[count].location = vfst.bci();
735 frames[count].method = m->jmethod_id();
736 count++;
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/collectedHeap.hpp"
28 #include "memory/universe.hpp"
29 #include "runtime/heapMonitoring.hpp"
30 #include "runtime/vframe.hpp"
31
32 static const int MaxStackDepth = 1024;
33
34 // Internal data structure representing traces, used when object has been GC'd.
35 struct StackTraceData : CHeapObj<mtInternal> {
36 jvmtiStackTrace* trace;
37 int references;
38
39 StackTraceData(jvmtiStackTrace* t) : trace(t), references(0) {}
40
41 StackTraceData() : trace(NULL), references(0) {}
42
43 // StackTraceDatas are shared around the board between various lists. So
44 // handle this by hand instead of having this in the destructor. There are
45 // cases where the struct is on the stack but holding heap data not to be
46 // freed.
47 static void free_data(StackTraceData* data) {
48 if (data->trace != NULL) {
49 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
50 FREE_C_HEAP_OBJ(data->trace);
51 }
52 delete data;
53 }
54 };
55
56 // Internal data structure representing traces with the oop, used while object
57 // is live. Since this structure just passes the trace to the GC lists, it does
58 // not handle any freeing.
59 struct StackTraceDataWithOop : public StackTraceData {
60 oop obj;
61
62 StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t), obj(o) {}
63
64 StackTraceDataWithOop() : StackTraceData(), obj(NULL) {}
65 };
66
67 // Fixed size buffer for holding garbage traces.
68 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
69 public:
70 GarbageTracesBuffer(uint32_t size) : _size(size) {
71 _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
72 size,
73 mtInternal);
74 memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
75 }
76
77 virtual ~GarbageTracesBuffer() {
78 FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
79 }
80
81 StackTraceData** get_traces() const {
82 return _garbage_traces;
83 }
84
85 bool store_trace(StackTraceData* trace) {
86 uint32_t index;
87 if (!select_replacement(&index)) {
88 return false;
89 }
90
91 StackTraceData* old_data = _garbage_traces[index];
92
93 if (old_data != NULL) {
94 old_data->references--;
95
96 if (old_data->references == 0) {
97 StackTraceData::free_data(old_data);
98 }
99 }
100
101 trace->references++;
102 _garbage_traces[index] = trace;
103 return true;
104 }
105
106 uint32_t size() const {
107 return _size;
108 }
109
110 protected:
111 // Subclasses select the trace to replace. Returns false if no replacement
112 // is to happen, otherwise stores the index of the trace to replace in
113 // *index.
114 virtual bool select_replacement(uint32_t* index) = 0;
115
116 const uint32_t _size;
117
118 private:
119 // The current garbage traces. A fixed-size ring buffer.
120 StackTraceData** _garbage_traces;
121 };
122
123 // Keep statistical sample of traces over the lifetime of the server.
124 // When the buffer is full, replace a random entry with probability
125 // 1/samples_seen. This strategy tends towards preserving the most frequently
126 // occuring traces over time.
127 class FrequentGarbageTraces : public GarbageTracesBuffer {
128 public:
129 FrequentGarbageTraces(int size)
130 : GarbageTracesBuffer(size),
131 _garbage_traces_pos(0),
132 _samples_seen(0) {
133 }
134
135 virtual ~FrequentGarbageTraces() {
136 }
137
138 virtual bool select_replacement(uint32_t* index) {
139 ++_samples_seen;
140
175
176 virtual bool select_replacement(uint32_t* index) {
177 *index = _garbage_traces_pos;
178
179 _garbage_traces_pos =
180 (_garbage_traces_pos + 1) % _size;
181
182 return true;
183 }
184
185 private:
186 // The current position in the buffer.
187 uint32_t _garbage_traces_pos;
188 };
189
190 // Each object that we profile is stored as trace with the thread_id.
191 class StackTraceStorage : public CHeapObj<mtInternal> {
192 public:
193 // The function that gets called to add a trace to the list of
194 // traces we are maintaining.
195 void add_trace(jvmtiStackTrace* trace, oop o);
196
197 // The function that gets called by the client to retrieve the list
198 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
199 void get_all_stack_traces(jvmtiStackTraces* traces);
200
201 // The function that gets called by the client to retrieve the list
202 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
203 void get_garbage_stack_traces(jvmtiStackTraces* traces);
204
205 // The function that gets called by the client to retrieve the list
206 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
207 void get_frequent_garbage_stack_traces(jvmtiStackTraces* traces);
208
209 // The function that gets called by the client to retrieve the list
210 // of stack traces. Passes a jvmtiStackTraces which will get mutated.
211 void get_cached_stack_traces(jvmtiStackTraces* traces);
212
213 // Executes whenever weak references are traversed. is_alive tells
214 // you if the given oop is still reachable and live.
215 void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
216
217 ~StackTraceStorage();
218 StackTraceStorage();
219
220 static StackTraceStorage* storage() {
221 static StackTraceStorage internal_storage;
222 return &internal_storage;
223 }
224
225 void initialize(int max_storage) {
226 MutexLocker mu(HeapMonitorStorage_lock);
227 allocate_storage(max_storage);
228 memset(&_stats, 0, sizeof(_stats));
229 }
230
231 void stop() {
232 MutexLocker mu(HeapMonitorStorage_lock);
233 free_storage();
234 }
235
236 const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
237 return _stats;
238 }
239
240 void accumulate_sample_rate(size_t rate) {
241 _stats.sample_rate_accumulation += rate;
242 _stats.sample_rate_count++;
243 }
244
245 bool initialized() { return _initialized; }
246
247 private:
248 // The traces currently sampled.
249 GrowableArray<StackTraceDataWithOop>* _allocated_traces;
250
251 // The traces currently sampled.
252 GrowableArray<StackTraceDataWithOop>* _traces_on_last_full_gc;
253
254 // Recent garbage traces.
255 MostRecentGarbageTraces* _recent_garbage_traces;
256
257 // Frequent garbage traces.
258 FrequentGarbageTraces* _frequent_garbage_traces;
259
260 // Heap Sampling statistics.
261 jvmtiHeapSamplingStats _stats;
262
263 // Maximum amount of storage provided by the JVMTI call initialize_profiling.
264 int _max_gc_storage;
265
266 static StackTraceStorage* internal_storage;
267 volatile bool _initialized;
268
269 // Support functions and classes for copying data to the external
270 // world.
271 class StackTraceDataCopier {
272 public:
273 virtual int size() const = 0;
274 virtual const StackTraceData* get(uint32_t i) const = 0;
275 };
276
277 class LiveStackTraceDataCopier : public StackTraceDataCopier {
278 public:
279 LiveStackTraceDataCopier(GrowableArray<StackTraceDataWithOop>* data) :
280 _data(data) {}
281 int size() const { return _data ? _data->length() : 0; }
282 const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); }
283
284 private:
285 GrowableArray<StackTraceDataWithOop>* _data;
286 };
287
288 class GarbageStackTraceDataCopier : public StackTraceDataCopier {
289 public:
290 GarbageStackTraceDataCopier(StackTraceData** data, int size) :
291 _data(data), _size(size) {}
292 int size() const { return _size; }
293 const StackTraceData* get(uint32_t i) const { return _data[i]; }
294
295 private:
296 StackTraceData** _data;
297 int _size;
298 };
299
300 // Copies from StackTraceData to jvmtiStackTrace.
301 bool deep_copy(jvmtiStackTrace* to, const StackTraceData* from);
302
303 // Creates a deep copy of the list of StackTraceData.
304 void copy_stack_traces(const StackTraceDataCopier &copier,
305 jvmtiStackTraces* traces);
306
307 void store_garbage_trace(const StackTraceDataWithOop &trace);
308
309 void free_garbage();
310 void free_storage();
311 void reset();
312
313 void allocate_storage(int max_gc_storage);
314 };
315
316 StackTraceStorage* StackTraceStorage::internal_storage;
317
318 // Statics for Sampler
319 double HeapMonitoring::_log_table[1 << FastLogNumBits];
320 bool HeapMonitoring::_enabled;
321 jint HeapMonitoring::_monitoring_rate;
322
323 // Cheap random number generator
324 uint64_t HeapMonitoring::_rnd;
325
326 StackTraceStorage::StackTraceStorage() {
327 reset();
328 }
329
330 void StackTraceStorage::reset() {
331 _allocated_traces = NULL;
332 _traces_on_last_full_gc = NULL;
333 _recent_garbage_traces = NULL;
334 _frequent_garbage_traces = NULL;
335 _max_gc_storage = 0;
336 _initialized = false;
337 }
338
339 void StackTraceStorage::free_garbage() {
340 StackTraceData** recent_garbage = NULL;
341 uint32_t recent_size = 0;
342
343 StackTraceData** frequent_garbage = NULL;
344 uint32_t frequent_size = 0;
345
346 if (_recent_garbage_traces != NULL) {
347 recent_garbage = _recent_garbage_traces->get_traces();
348 recent_size = _recent_garbage_traces->size();
349 }
350
351 if (_frequent_garbage_traces != NULL) {
352 frequent_garbage = _frequent_garbage_traces->get_traces();
353 frequent_size = _frequent_garbage_traces->size();
354 }
355
356 // Simple solution since this happens at exit.
357 // Go through the recent and remove any that only are referenced there.
358 for (uint32_t i = 0; i < recent_size; i++) {
359 StackTraceData* trace = recent_garbage[i];
360 if (trace != NULL) {
361 trace->references--;
362
363 if (trace->references == 0) {
364 StackTraceData::free_data(trace);
365 }
366 }
367 }
368
369 // Then go through the frequent and remove those that are now only there.
370 for (uint32_t i = 0; i < frequent_size; i++) {
371 StackTraceData* trace = frequent_garbage[i];
372 if (trace != NULL) {
373 trace->references--;
374
375 if (trace->references == 0) {
376 StackTraceData::free_data(trace);
377 }
378 }
379 }
380 }
381
382 void StackTraceStorage::free_storage() {
383 if (!_initialized) {
384 return;
385 }
386
387 delete _allocated_traces;
388 delete _traces_on_last_full_gc;
389
390 free_garbage();
391 delete _recent_garbage_traces;
392 delete _frequent_garbage_traces;
393
394 reset();
395 }
396
397 StackTraceStorage::~StackTraceStorage() {
398 free_storage();
399 }
400
401 void StackTraceStorage::allocate_storage(int max_gc_storage) {
402 // In case multiple threads got locked and then 1 by 1 got through.
403 if (_initialized) {
404 return;
405 }
406
407 _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
408 GrowableArray<StackTraceDataWithOop>(128, true);
409 _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
410 GrowableArray<StackTraceDataWithOop>(128, true);
411
412 _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
413 _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
414
415 _max_gc_storage = max_gc_storage;
416 _initialized = true;
417 }
418
419 void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) {
420 MutexLocker mu(HeapMonitorStorage_lock);
421 // Last minute check on initialization here in case:
422 // Between the moment object_alloc_do_sample's check for initialization
423 // and now, there was a stop() that deleted the data.
424 if (_initialized) {
425 StackTraceDataWithOop new_data(trace, o);
426 _stats.sample_count++;
427 _stats.stack_depth_accumulation += trace->frame_count;
428 _allocated_traces->append(new_data);
429 }
430 }
431
432 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
433 OopClosure* f) {
434 MutexLocker mu(HeapMonitorStorage_lock);
435 size_t count = 0;
436 if (_initialized) {
437 int len = _allocated_traces->length();
438
439 _traces_on_last_full_gc->clear();
440
441 // Compact the oop traces. Moves the live oops to the beginning of the
442 // growable array, potentially overwriting the dead ones.
443 int curr_pos = 0;
444 for (int i = 0; i < len; i++) {
445 StackTraceDataWithOop &trace = _allocated_traces->at(i);
446 oop value = RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(
447 &trace.obj);
448 if (is_alive->do_object_b(value)) {
449 // Update the oop to point to the new object if it is still alive.
450 f->do_oop(&(trace.obj));
451
452 // Copy the old trace, if it is still live.
453 _allocated_traces->at_put(curr_pos++, trace);
454
455 // Store the live trace in a cache, to be served up on /heapz.
456 _traces_on_last_full_gc->append(trace);
457
458 count++;
459 } else {
460 // If the old trace is no longer live, add it to the list of
461 // recently collected garbage.
462 store_garbage_trace(trace);
463 }
464 }
465
466 // Zero out remaining array elements. Even though the call to trunc_to
467 // below truncates these values, zeroing them out is good practice.
468 StackTraceDataWithOop zero_trace;
469 for (int i = curr_pos; i < len; i++) {
470 _allocated_traces->at_put(i, zero_trace);
471 }
472
473 // Set the array's length to the number of live elements.
474 _allocated_traces->trunc_to(curr_pos);
475 }
476
477 log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
478 }
479
480 bool StackTraceStorage::deep_copy(jvmtiStackTrace* to,
481 const StackTraceData* from) {
482 const jvmtiStackTrace* src = from->trace;
483 *to = *src;
484
485 to->frames =
486 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
487
488 if (to->frames == NULL) {
489 return false;
490 }
491
492 memcpy(to->frames,
493 src->frames,
494 sizeof(jvmtiFrameInfo) * src->frame_count);
495 return true;
496 }
497
498 // Called by the outside world; returns a copy of the stack traces
499 // (because we could be replacing them as the user handles them).
500 // The array is secretly null-terminated (to make it easier to reclaim).
501 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) {
502 if (!_allocated_traces) {
503 traces->stack_traces = NULL;
504 traces->trace_count = 0;
505 return;
506 }
507
508 LiveStackTraceDataCopier copier(_allocated_traces);
509 copy_stack_traces(copier, traces);
510 }
511
512 // See comment on get_all_stack_traces
513 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) {
514 if (!_recent_garbage_traces) {
515 traces->stack_traces = NULL;
516 traces->trace_count = 0;
517 return;
518 }
519
520 GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
521 _recent_garbage_traces->size());
522 copy_stack_traces(copier, traces);
523 }
524
525 // See comment on get_all_stack_traces
526 void StackTraceStorage::get_frequent_garbage_stack_traces(
527 jvmtiStackTraces* traces) {
528 if (!_frequent_garbage_traces) {
529 traces->stack_traces = NULL;
530 traces->trace_count = 0;
531 return;
532 }
533
534 GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
535 _frequent_garbage_traces->size());
536 copy_stack_traces(copier, traces);
537 }
538
539 // See comment on get_all_stack_traces
540 void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) {
541 if (!_traces_on_last_full_gc) {
542 traces->stack_traces = NULL;
543 traces->trace_count = 0;
544 return;
545 }
546
547 LiveStackTraceDataCopier copier(_traces_on_last_full_gc);
548 copy_stack_traces(copier, traces);
549 }
550
551 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
552 jvmtiStackTraces* traces) {
553 MutexLocker mu(HeapMonitorStorage_lock);
554 int len = copier.size();
555
556 // Create a new array to store the StackTraceData objects.
557 // + 1 for a NULL at the end.
558 jvmtiStackTrace* t =
559 NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
560 if (t == NULL) {
561 traces->stack_traces = NULL;
562 traces->trace_count = 0;
563 return;
564 }
565 // +1 to have a NULL at the end of the array.
566 memset(t, 0, (len + 1) * sizeof(*t));
567
568 // Copy the StackTraceData objects into the new array.
569 int trace_count = 0;
570 for (int i = 0; i < len; i++) {
571 const StackTraceData* stack_trace = copier.get(i);
572 if (stack_trace != NULL) {
573 jvmtiStackTrace* to = &t[trace_count];
574 if (!deep_copy(to, stack_trace)) {
575 continue;
576 }
577 trace_count++;
578 }
579 }
580
581 traces->stack_traces = t;
582 traces->trace_count = trace_count;
583 }
584
585 void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) {
586 StackTraceData* new_trace = new StackTraceData(trace.trace);
587
588 bool accepted = _recent_garbage_traces->store_trace(new_trace);
589
590 // Accepted is on the right of the boolean to force the store_trace to happen.
591 accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
592
593 if (!accepted) {
594 // No one wanted to use it.
595 delete new_trace;
596 }
597
598 _stats.garbage_collected_samples++;
599 }
600
601 void HeapMonitoring::get_live_traces(jvmtiStackTraces* traces) {
602 StackTraceStorage::storage()->get_all_stack_traces(traces);
603 }
604
605 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) {
606 const jvmtiHeapSamplingStats& internal_stats =
607 StackTraceStorage::storage()->get_heap_sampling_stats();
608 *stats = internal_stats;
609 }
610
611 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces* traces) {
612 StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
613 }
614
615 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces* traces) {
616 StackTraceStorage::storage()->get_garbage_stack_traces(traces);
617 }
618
619 void HeapMonitoring::get_cached_traces(jvmtiStackTraces* traces) {
620 StackTraceStorage::storage()->get_cached_stack_traces(traces);
621 }
622
623 void HeapMonitoring::release_traces(jvmtiStackTraces* traces) {
624 jint trace_count = traces->trace_count;
625 jvmtiStackTrace* stack_traces = traces->stack_traces;
626
627 for (jint i = 0; i < trace_count; i++) {
628 jvmtiStackTrace* current_trace = stack_traces + i;
629 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
630 }
631
632 FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
633 traces->trace_count = 0;
634 traces->stack_traces = NULL;
635 }
636
637 // Invoked by the GC to clean up old stack traces and remove old arrays
638 // of instrumentation that are still lying around.
639 void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
640 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
641 StackTraceStorage::storage()->weak_oops_do(is_alive, f);
642 }
643
644 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
645 jint max_gc_storage) {
646 MutexLocker mu(HeapMonitor_lock);
647 // Ignore if already enabled.
648 if (_enabled) {
649 return;
650 }
651
652 _monitoring_rate = monitoring_rate;
653
654 // Populate the lookup table for fast_log2.
655 // This approximates the log2 curve with a step function.
656 // Steps have height equal to log2 of the mid-point of the step.
657 for (int i = 0; i < (1 << FastLogNumBits); i++) {
658 double half_way = static_cast<double>(i + 0.5);
659 _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
660 }
661
662 JavaThread* t = static_cast<JavaThread*>(Thread::current());
663 _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
664 if (_rnd == 0) {
665 _rnd = 1;
666 }
667
668 StackTraceStorage::storage()->initialize(max_gc_storage);
669 _enabled = true;
670 }
671
672 void HeapMonitoring::stop_profiling() {
673 MutexLocker mu(HeapMonitor_lock);
674 StackTraceStorage::storage()->stop();
675 _enabled = false;
676 }
677
678 // Generates a geometric variable with the specified mean (512K by default).
679 // This is done by generating a random number between 0 and 1 and applying
680 // the inverse cumulative distribution function for an exponential.
681 // Specifically: Let m be the inverse of the sample rate, then
682 // the probability distribution function is m*exp(-mx) so the CDF is
683 // p = 1 - exp(-mx), so
684 // q = 1 - p = exp(-mx)
685 // log_e(q) = -mx
686 // -log_e(q)/m = x
687 // log_2(q) * (-log_e(2) * 1/m) = x
688 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
689 void HeapMonitoring::pick_next_sample(size_t* ptr) {
690 _rnd = next_random(_rnd);
691 // Take the top 26 bits as the random number
692 // (This plus a 1<<58 sampling bound gives a max possible step of
693 // 5194297183973780480 bytes. In this case,
694 // for sample_parameter = 1<<19, max possible step is
695 // 9448372 bytes (24 bits).
696 const uint64_t PrngModPower = 48; // Number of bits in prng
697 // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
698 // under piii debug for some binaries.
699 double q = static_cast<uint32_t>(_rnd >> (PrngModPower - 26)) + 1.0;
700 // Put the computed p-value through the CDF of a geometric.
701 // For faster performance (save ~1/20th exec time), replace
702 // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705)
703 // The value 26.000705 is used rather than 26 to compensate
704 // for inaccuracies in FastLog2 which otherwise result in a
705 // negative answer.
706 double log_val = (fast_log2(q) - 26);
707 size_t rate = static_cast<size_t>(
708 (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
709 *ptr = rate;
710
711 StackTraceStorage::storage()->accumulate_sample_rate(rate);
712 }
713
714 void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, intx byte_size) {
715 JavaThread* thread = static_cast<JavaThread*>(t);
716 if (StackTraceStorage::storage()->initialized()) {
717 assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
718 JavaThread* thread = static_cast<JavaThread*>(t);
719
720 jvmtiStackTrace* trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
721 if (trace == NULL) {
722 return;
723 }
724
725 jvmtiFrameInfo* frames =
726 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
727
728 if (frames == NULL) {
729 FREE_C_HEAP_OBJ(trace);
730 return;
731 }
732
733 trace->frames = frames;
734 trace->thread_id = SharedRuntime::get_java_tid(thread);
735 trace->size = byte_size;
736 trace->frame_count = 0;
737
738 if (thread->has_last_Java_frame()) { // just to be safe
739 vframeStream vfst(thread, true);
740 int count = 0;
741 while (!vfst.at_end() && count < MaxStackDepth) {
742 Method* m = vfst.method();
743 frames[count].location = vfst.bci();
744 frames[count].method = m->jmethod_id();
745 count++;
|