1 /* 2 * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shared/collectedHeap.hpp" 28 #include "memory/universe.hpp" 29 #include "runtime/heapMonitoring.hpp" 30 #include "runtime/vframe.hpp" 31 32 static const int MaxStackDepth = 1024; 33 34 // Internal data structure representing traces. 35 struct StackTraceData : CHeapObj<mtInternal> { 36 jvmtiStackTrace *trace; 37 oop obj; 38 int references; 39 40 StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {} 41 42 StackTraceData() : trace(NULL), obj(NULL), references(0) {} 43 44 // StackTraceDatas are shared around the board between various lists. So 45 // handle this by hand instead of having this in the destructor. There are 46 // cases where the struct is on the stack but holding heap data not to be 47 // freed. 48 static void free_data(StackTraceData *data) { 49 if (data->trace != NULL) { 50 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames); 51 FREE_C_HEAP_OBJ(data->trace); 52 } 53 delete data; 54 } 55 }; 56 57 // Fixed size buffer for holding garbage traces. 58 class GarbageTracesBuffer : public CHeapObj<mtInternal> { 59 public: 60 GarbageTracesBuffer(uint32_t size) : _size(size) { 61 _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*, 62 size, 63 mtInternal); 64 memset(_garbage_traces, 0, sizeof(StackTraceData*) * size); 65 } 66 67 virtual ~GarbageTracesBuffer() { 68 FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces); 69 } 70 71 StackTraceData** get_traces() const { 72 return _garbage_traces; 73 } 74 75 bool store_trace(StackTraceData *trace) { 76 uint32_t index; 77 if (!select_replacement(&index)) { 78 return false; 79 } 80 81 StackTraceData *old_data = _garbage_traces[index]; 82 83 if (old_data != NULL) { 84 old_data->references--; 85 86 if (old_data->references == 0) { 87 StackTraceData::free_data(old_data); 88 } 89 } 90 91 trace->references++; 92 _garbage_traces[index] = trace; 93 return true; 94 } 95 96 uint32_t size() const { 97 return _size; 98 } 99 100 protected: 101 // Subclasses select the trace to replace. Returns false if no replacement 102 // is to happen, otherwise stores the index of the trace to replace in 103 // *index. 104 virtual bool select_replacement(uint32_t *index) = 0; 105 106 const uint32_t _size; 107 108 private: 109 // The current garbage traces. A fixed-size ring buffer. 110 StackTraceData **_garbage_traces; 111 }; 112 113 // Keep statistical sample of traces over the lifetime of the server. 114 // When the buffer is full, replace a random entry with probability 115 // 1/samples_seen. This strategy tends towards preserving the most frequently 116 // occuring traces over time. 117 class FrequentGarbageTraces : public GarbageTracesBuffer { 118 public: 119 FrequentGarbageTraces(int size) 120 : GarbageTracesBuffer(size), 121 _garbage_traces_pos(0), 122 _samples_seen(0) { 123 } 124 125 virtual ~FrequentGarbageTraces() { 126 } 127 128 virtual bool select_replacement(uint32_t* index) { 129 ++_samples_seen; 130 131 if (_garbage_traces_pos < _size) { 132 *index = _garbage_traces_pos++; 133 return true; 134 } 135 136 uint64_t random_uint64 = 137 (static_cast<uint64_t>(::random()) << 32) | ::random(); 138 139 uint32_t random_index = random_uint64 % _samples_seen; 140 if (random_index < _size) { 141 *index = random_index; 142 return true; 143 } 144 145 return false; 146 } 147 148 private: 149 // The current position in the buffer as we initially fill it. 150 uint32_t _garbage_traces_pos; 151 152 uint64_t _samples_seen; 153 }; 154 155 // Store most recent garbage traces. 156 class MostRecentGarbageTraces : public GarbageTracesBuffer { 157 public: 158 MostRecentGarbageTraces(int size) 159 : GarbageTracesBuffer(size), 160 _garbage_traces_pos(0) { 161 } 162 163 virtual ~MostRecentGarbageTraces() { 164 } 165 166 virtual bool select_replacement(uint32_t* index) { 167 *index = _garbage_traces_pos; 168 169 _garbage_traces_pos = 170 (_garbage_traces_pos + 1) % _size; 171 172 return true; 173 } 174 175 private: 176 // The current position in the buffer. 177 uint32_t _garbage_traces_pos; 178 }; 179 180 // Each object that we profile is stored as trace with the thread_id. 181 class StackTraceStorage : public CHeapObj<mtInternal> { 182 public: 183 // The function that gets called to add a trace to the list of 184 // traces we are maintaining. 185 void add_trace(jvmtiStackTrace *trace, oop o); 186 187 // The function that gets called by the client to retrieve the list 188 // of stack traces. Passes a jvmtiStackTraces which will get mutated. 189 void get_all_stack_traces(jvmtiStackTraces *traces); 190 191 // The function that gets called by the client to retrieve the list 192 // of stack traces. Passes a jvmtiStackTraces which will get mutated. 193 void get_garbage_stack_traces(jvmtiStackTraces *traces); 194 195 // The function that gets called by the client to retrieve the list 196 // of stack traces. Passes a jvmtiStackTraces which will get mutated. 197 void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces); 198 199 // Executes whenever weak references are traversed. is_alive tells 200 // you if the given oop is still reachable and live. 201 size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); 202 203 ~StackTraceStorage(); 204 StackTraceStorage(); 205 206 static StackTraceStorage* storage() { 207 static StackTraceStorage internal_storage; 208 return &internal_storage; 209 } 210 211 void initialize(int max_storage) { 212 MutexLocker mu(HeapMonitor_lock); 213 free_storage(); 214 allocate_storage(max_storage); 215 memset(&_stats, 0, sizeof(_stats)); 216 } 217 218 const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { 219 return _stats; 220 } 221 222 void accumulate_sample_rate(size_t rate) { 223 _stats.sample_rate_accumulation += rate; 224 _stats.sample_rate_count++; 225 } 226 227 bool initialized() { return _initialized; } 228 volatile bool *initialized_address() { return &_initialized; } 229 230 private: 231 // Protects the traces currently sampled (below). 232 volatile intptr_t _stack_storage_lock[1]; 233 234 // The traces currently sampled. 235 GrowableArray<StackTraceData> *_allocated_traces; 236 237 // Recent garbage traces. 238 MostRecentGarbageTraces *_recent_garbage_traces; 239 240 // Frequent garbage traces. 241 FrequentGarbageTraces *_frequent_garbage_traces; 242 243 // Heap Sampling statistics. 244 jvmtiHeapSamplingStats _stats; 245 246 // Maximum amount of storage provided by the JVMTI call initialize_profiling. 247 int _max_gc_storage; 248 249 static StackTraceStorage* internal_storage; 250 volatile bool _initialized; 251 252 // Support functions and classes for copying data to the external 253 // world. 254 class StackTraceDataCopier { 255 public: 256 virtual int size() const = 0; 257 virtual const StackTraceData *get(uint32_t i) const = 0; 258 }; 259 260 class LiveStackTraceDataCopier : public StackTraceDataCopier { 261 public: 262 LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) : 263 _data(data) {} 264 int size() const { return _data ? _data->length() : 0; } 265 const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); } 266 267 private: 268 GrowableArray<StackTraceData> *_data; 269 }; 270 271 class GarbageStackTraceDataCopier : public StackTraceDataCopier { 272 public: 273 GarbageStackTraceDataCopier(StackTraceData **data, int size) : 274 _data(data), _size(size) {} 275 int size() const { return _size; } 276 const StackTraceData *get(uint32_t i) const { return _data[i]; } 277 278 private: 279 StackTraceData **_data; 280 int _size; 281 }; 282 283 // Copies from StackTraceData to jvmtiStackTrace. 284 bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from); 285 286 // Creates a deep copy of the list of StackTraceData. 287 void copy_stack_traces(const StackTraceDataCopier &copier, 288 jvmtiStackTraces *traces); 289 290 void store_garbage_trace(const StackTraceData &trace); 291 292 void free_garbage(); 293 void free_storage(); 294 void allocate_storage(int max_gc_storage); 295 }; 296 297 StackTraceStorage* StackTraceStorage::internal_storage; 298 299 // Statics for Sampler 300 double HeapMonitoring::_log_table[1 << FastLogNumBits]; 301 bool HeapMonitoring::_enabled; 302 AlwaysTrueClosure HeapMonitoring::_always_true; 303 jint HeapMonitoring::_monitoring_rate; 304 305 // Cheap random number generator 306 uint64_t HeapMonitoring::_rnd; 307 308 StackTraceStorage::StackTraceStorage() : 309 _allocated_traces(NULL), 310 _recent_garbage_traces(NULL), 311 _frequent_garbage_traces(NULL), 312 _max_gc_storage(0), 313 _initialized(false) { 314 _stack_storage_lock[0] = 0; 315 } 316 317 void StackTraceStorage::free_garbage() { 318 StackTraceData **recent_garbage = NULL; 319 uint32_t recent_size = 0; 320 321 StackTraceData **frequent_garbage = NULL; 322 uint32_t frequent_size = 0; 323 324 if (_recent_garbage_traces != NULL) { 325 recent_garbage = _recent_garbage_traces->get_traces(); 326 recent_size = _recent_garbage_traces->size(); 327 } 328 329 if (_frequent_garbage_traces != NULL) { 330 frequent_garbage = _frequent_garbage_traces->get_traces(); 331 frequent_size = _frequent_garbage_traces->size(); 332 } 333 334 // Simple solution since this happens at exit. 335 // Go through the recent and remove any that only are referenced there. 336 for (uint32_t i = 0; i < recent_size; i++) { 337 StackTraceData *trace = recent_garbage[i]; 338 if (trace != NULL) { 339 trace->references--; 340 341 if (trace->references == 0) { 342 StackTraceData::free_data(trace); 343 } 344 } 345 } 346 347 // Then go through the frequent and remove those that are now only there. 348 for (uint32_t i = 0; i < frequent_size; i++) { 349 StackTraceData *trace = frequent_garbage[i]; 350 if (trace != NULL) { 351 trace->references--; 352 353 if (trace->references == 0) { 354 StackTraceData::free_data(trace); 355 } 356 } 357 } 358 } 359 360 void StackTraceStorage::free_storage() { 361 delete _allocated_traces; 362 363 free_garbage(); 364 delete _recent_garbage_traces; 365 delete _frequent_garbage_traces; 366 _initialized = false; 367 } 368 369 StackTraceStorage::~StackTraceStorage() { 370 free_storage(); 371 } 372 373 void StackTraceStorage::allocate_storage(int max_gc_storage) { 374 // In case multiple threads got locked and then 1 by 1 got through. 375 if (_initialized) { 376 return; 377 } 378 379 _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) 380 GrowableArray<StackTraceData>(128, true); 381 382 _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); 383 _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); 384 385 _max_gc_storage = max_gc_storage; 386 _initialized = true; 387 } 388 389 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { 390 MutexLocker mu(HeapMonitor_lock); 391 StackTraceData new_data(trace, o); 392 _stats.sample_count++; 393 _stats.stack_depth_accumulation += trace->frame_count; 394 _allocated_traces->append(new_data); 395 } 396 397 size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, 398 OopClosure *f) { 399 MutexLocker mu(HeapMonitor_lock); 400 size_t count = 0; 401 if (initialized()) { 402 int len = _allocated_traces->length(); 403 404 // Compact the oop traces. Moves the live oops to the beginning of the 405 // growable array, potentially overwriting the dead ones. 406 int curr_pos = 0; 407 for (int i = 0; i < len; i++) { 408 StackTraceData &trace = _allocated_traces->at(i); 409 oop value = trace.obj; 410 if (Universe::heap()->is_in_reserved(value) 411 && is_alive->do_object_b(value)) { 412 // Update the oop to point to the new object if it is still alive. 413 f->do_oop(&(trace.obj)); 414 415 // Copy the old trace, if it is still live. 416 _allocated_traces->at_put(curr_pos++, trace); 417 418 count++; 419 } else { 420 // If the old trace is no longer live, add it to the list of 421 // recently collected garbage. 422 store_garbage_trace(trace); 423 } 424 } 425 426 // Zero out remaining array elements. Even though the call to trunc_to 427 // below truncates these values, zeroing them out is good practice. 428 StackTraceData zero_trace; 429 for (int i = curr_pos; i < len; i++) { 430 _allocated_traces->at_put(i, zero_trace); 431 } 432 433 // Set the array's length to the number of live elements. 434 _allocated_traces->trunc_to(curr_pos); 435 } 436 437 return count; 438 } 439 440 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to, 441 const StackTraceData *from) { 442 const jvmtiStackTrace *src = from->trace; 443 *to = *src; 444 445 to->frames = 446 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal); 447 448 if (to->frames == NULL) { 449 return false; 450 } 451 452 memcpy(to->frames, 453 src->frames, 454 sizeof(jvmtiFrameInfo) * src->frame_count); 455 return true; 456 } 457 458 // Called by the outside world; returns a copy of the stack traces 459 // (because we could be replacing them as the user handles them). 460 // The array is secretly null-terminated (to make it easier to reclaim). 461 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) { 462 LiveStackTraceDataCopier copier(_allocated_traces); 463 copy_stack_traces(copier, traces); 464 } 465 466 // See comment on get_all_stack_traces 467 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) { 468 GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), 469 _recent_garbage_traces->size()); 470 copy_stack_traces(copier, traces); 471 } 472 473 // See comment on get_all_stack_traces 474 void StackTraceStorage::get_frequent_garbage_stack_traces( 475 jvmtiStackTraces *traces) { 476 GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), 477 _frequent_garbage_traces->size()); 478 copy_stack_traces(copier, traces); 479 } 480 481 482 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, 483 jvmtiStackTraces *traces) { 484 MutexLocker mu(HeapMonitor_lock); 485 int len = copier.size(); 486 487 // Create a new array to store the StackTraceData objects. 488 // + 1 for a NULL at the end. 489 jvmtiStackTrace *t = 490 NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal); 491 if (t == NULL) { 492 traces->stack_traces = NULL; 493 traces->trace_count = 0; 494 return; 495 } 496 // +1 to have a NULL at the end of the array. 497 memset(t, 0, (len + 1) * sizeof(*t)); 498 499 // Copy the StackTraceData objects into the new array. 500 int trace_count = 0; 501 for (int i = 0; i < len; i++) { 502 const StackTraceData *stack_trace = copier.get(i); 503 if (stack_trace != NULL) { 504 jvmtiStackTrace *to = &t[trace_count]; 505 if (!deep_copy(to, stack_trace)) { 506 continue; 507 } 508 trace_count++; 509 } 510 } 511 512 traces->stack_traces = t; 513 traces->trace_count = trace_count; 514 } 515 516 void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) { 517 StackTraceData *new_trace = new StackTraceData(); 518 *new_trace = trace; 519 520 bool accepted = _recent_garbage_traces->store_trace(new_trace); 521 522 // Accepted is on the right of the boolean to force the store_trace to happen. 523 accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted; 524 525 if (!accepted) { 526 // No one wanted to use it. 527 delete new_trace; 528 } 529 530 _stats.garbage_collected_samples++; 531 } 532 533 // Delegate the initialization question to the underlying storage system. 534 bool HeapMonitoring::initialized() { 535 return StackTraceStorage::storage()->initialized(); 536 } 537 538 // Delegate the initialization question to the underlying storage system. 539 bool *HeapMonitoring::initialized_address() { 540 return 541 const_cast<bool*>(StackTraceStorage::storage()->initialized_address()); 542 } 543 544 void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) { 545 StackTraceStorage::storage()->get_all_stack_traces(traces); 546 } 547 548 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) { 549 const jvmtiHeapSamplingStats& internal_stats = 550 StackTraceStorage::storage()->get_heap_sampling_stats(); 551 *stats = internal_stats; 552 } 553 554 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) { 555 StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces); 556 } 557 558 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) { 559 StackTraceStorage::storage()->get_garbage_stack_traces(traces); 560 } 561 562 void HeapMonitoring::release_traces(jvmtiStackTraces *traces) { 563 jint trace_count = traces->trace_count; 564 jvmtiStackTrace *stack_traces = traces->stack_traces; 565 566 for (jint i = 0; i < trace_count; i++) { 567 jvmtiStackTrace *current_trace = stack_traces + i; 568 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames); 569 } 570 571 FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces); 572 traces->trace_count = 0; 573 traces->stack_traces = NULL; 574 } 575 576 // Invoked by the GC to clean up old stack traces and remove old arrays 577 // of instrumentation that are still lying around. 578 size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, 579 OopClosure *f) { 580 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 581 return StackTraceStorage::storage()->weak_oops_do(is_alive, f); 582 } 583 584 void HeapMonitoring::initialize_profiling(jint monitoring_rate, 585 jint max_gc_storage) { 586 // Ignore if already enabled. 587 if (_enabled) { 588 return; 589 } 590 591 _monitoring_rate = monitoring_rate; 592 593 // Populate the lookup table for fast_log2. 594 // This approximates the log2 curve with a step function. 595 // Steps have height equal to log2 of the mid-point of the step. 596 for (int i = 0; i < (1 << FastLogNumBits); i++) { 597 double half_way = static_cast<double>(i + 0.5); 598 _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0)); 599 } 600 601 JavaThread *t = static_cast<JavaThread *>(Thread::current()); 602 _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t)); 603 if (_rnd == 0) { 604 _rnd = 1; 605 } 606 607 StackTraceStorage::storage()->initialize(max_gc_storage); 608 _enabled = true; 609 } 610 611 void HeapMonitoring::stop_profiling() { 612 _enabled = false; 613 } 614 615 // Generates a geometric variable with the specified mean (512K by default). 616 // This is done by generating a random number between 0 and 1 and applying 617 // the inverse cumulative distribution function for an exponential. 618 // Specifically: Let m be the inverse of the sample rate, then 619 // the probability distribution function is m*exp(-mx) so the CDF is 620 // p = 1 - exp(-mx), so 621 // q = 1 - p = exp(-mx) 622 // log_e(q) = -mx 623 // -log_e(q)/m = x 624 // log_2(q) * (-log_e(2) * 1/m) = x 625 // In the code, q is actually in the range 1 to 2**26, hence the -26 below 626 void HeapMonitoring::pick_next_sample(size_t *ptr) { 627 _rnd = next_random(_rnd); 628 // Take the top 26 bits as the random number 629 // (This plus a 1<<58 sampling bound gives a max possible step of 630 // 5194297183973780480 bytes. In this case, 631 // for sample_parameter = 1<<19, max possible step is 632 // 9448372 bytes (24 bits). 633 const uint64_t prng_mod_power = 48; // Number of bits in prng 634 // The uint32_t cast is to prevent a (hard-to-reproduce) NAN 635 // under piii debug for some binaries. 636 double q = static_cast<uint32_t>(_rnd >> (prng_mod_power - 26)) + 1.0; 637 // Put the computed p-value through the CDF of a geometric. 638 // For faster performance (save ~1/20th exec time), replace 639 // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705) 640 // The value 26.000705 is used rather than 26 to compensate 641 // for inaccuracies in FastLog2 which otherwise result in a 642 // negative answer. 643 double log_val = (fast_log2(q) - 26); 644 size_t rate = static_cast<size_t>( 645 (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1); 646 *ptr = rate; 647 648 StackTraceStorage::storage()->accumulate_sample_rate(rate); 649 } 650 651 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { 652 #if defined(X86) || defined(PPC) 653 JavaThread *thread = static_cast<JavaThread *>(t); 654 if (StackTraceStorage::storage()->initialized()) { 655 assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); 656 JavaThread *thread = static_cast<JavaThread *>(t); 657 658 jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); 659 if (trace == NULL) { 660 return; 661 } 662 663 jvmtiFrameInfo *frames = 664 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); 665 666 if (frames == NULL) { 667 FREE_C_HEAP_OBJ(trace); 668 return; 669 } 670 671 trace->frames = frames; 672 trace->thread_id = SharedRuntime::get_java_tid(thread); 673 trace->size = byte_size; 674 trace->frame_count = 0; 675 676 if (thread->has_last_Java_frame()) { // just to be safe 677 vframeStream vfst(thread, true); 678 int count = 0; 679 while (!vfst.at_end() && count < MaxStackDepth) { 680 Method* m = vfst.method(); 681 frames[count].location = vfst.bci(); 682 frames[count].method = m->jmethod_id(); 683 count++; 684 685 vfst.next(); 686 } 687 trace->frame_count = count; 688 } 689 690 if (trace->frame_count> 0) { 691 // Success! 692 StackTraceStorage::storage()->add_trace(trace, o); 693 return; 694 } 695 696 // Failure! 697 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); 698 FREE_C_HEAP_OBJ(trace); 699 } 700 #else 701 Unimplemented(); 702 #endif 703 }