1 /* 2 * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shared/collectedHeap.hpp" 28 #include "memory/universe.hpp" 29 #include "runtime/heapMonitoring.hpp" 30 #include "runtime/vframe.hpp" 31 32 const int MaxStackDepth = 1024; 33 34 // Internal data structure representing traces. 35 struct StackTraceData : CHeapObj<mtInternal> { 36 jvmtiStackTrace *trace; 37 oop obj; 38 int references; 39 40 StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {} 41 42 StackTraceData() : trace(NULL), obj(NULL), references(0) {} 43 44 // StackTraceDatas are shared around the board between various lists. So 45 // handle this by hand instead of having this in the destructor. There are 46 // cases where the struct is on the stack but holding heap data not to be 47 // freed. 48 static void free_data(StackTraceData *data) { 49 if (data->trace != NULL) { 50 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames); 51 FREE_C_HEAP_OBJ(data->trace); 52 } 53 delete data; 54 } 55 }; 56 57 // Fixed size buffer for holding garbage traces. 58 class GarbageTracesBuffer : public CHeapObj<mtInternal> { 59 public: 60 GarbageTracesBuffer(uint32_t size) : _size(size) { 61 _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*, 62 size, 63 mtInternal); 64 memset(_garbage_traces, 0, sizeof(StackTraceData*) * size); 65 } 66 67 virtual ~GarbageTracesBuffer() { 68 FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces); 69 } 70 71 StackTraceData** get_traces() const { 72 return _garbage_traces; 73 } 74 75 bool store_trace(StackTraceData *trace) { 76 uint32_t index; 77 if (!select_replacement(&index)) { 78 return false; 79 } 80 81 StackTraceData *old_data = _garbage_traces[index]; 82 83 if (old_data != NULL) { 84 old_data->references--; 85 86 if (old_data->references == 0) { 87 StackTraceData::free_data(old_data); 88 } 89 } 90 91 trace->references++; 92 _garbage_traces[index] = trace; 93 return true; 94 } 95 96 uint32_t size() const { 97 return _size; 98 } 99 100 protected: 101 // Subclasses select the trace to replace. Returns false if no replacement 102 // is to happen, otherwise stores the index of the trace to replace in 103 // *index. 104 virtual bool select_replacement(uint32_t *index) = 0; 105 106 const uint32_t _size; 107 108 private: 109 // The current garbage traces. A fixed-size ring buffer. 110 StackTraceData **_garbage_traces; 111 }; 112 113 // Keep statistical sample of traces over the lifetime of the server. 114 // When the buffer is full, replace a random entry with probability 115 // 1/samples_seen. This strategy tends towards preserving the most frequently 116 // occuring traces over time. 117 class FrequentGarbageTraces : public GarbageTracesBuffer { 118 public: 119 FrequentGarbageTraces(int size) 120 : GarbageTracesBuffer(size), 121 _garbage_traces_pos(0), 122 _samples_seen(0) { 123 } 124 125 virtual ~FrequentGarbageTraces() { 126 } 127 128 virtual bool select_replacement(uint32_t* index) { 129 ++_samples_seen; 130 131 if (_garbage_traces_pos < _size) { 132 *index = _garbage_traces_pos++; 133 return true; 134 } 135 136 uint64_t random_uint64 = 137 (static_cast<uint64_t>(::random()) << 32) | ::random(); 138 139 uint32_t random_index = random_uint64 % _samples_seen; 140 if (random_index < _size) { 141 *index = random_index; 142 return true; 143 } 144 145 return false; 146 } 147 148 private: 149 // The current position in the buffer as we initially fill it. 150 uint32_t _garbage_traces_pos; 151 152 uint64_t _samples_seen; 153 }; 154 155 // Store most recent garbage traces. 156 class MostRecentGarbageTraces : public GarbageTracesBuffer { 157 public: 158 MostRecentGarbageTraces(int size) 159 : GarbageTracesBuffer(size), 160 _garbage_traces_pos(0) { 161 } 162 163 virtual ~MostRecentGarbageTraces() { 164 } 165 166 virtual bool select_replacement(uint32_t* index) { 167 *index = _garbage_traces_pos; 168 169 _garbage_traces_pos = 170 (_garbage_traces_pos + 1) % _size; 171 172 return true; 173 } 174 175 private: 176 // The current position in the buffer. 177 uint32_t _garbage_traces_pos; 178 }; 179 180 // Each object that we profile is stored as trace with the thread_id. 181 class StackTraceStorage : public CHeapObj<mtInternal> { 182 public: 183 // The function that gets called to add a trace to the list of 184 // traces we are maintaining. 185 void add_trace(jvmtiStackTrace *trace, oop o); 186 187 // The function that gets called by the client to retrieve the list 188 // of stack traces. Passes a jvmtiStackTraces which will get mutated. 189 void get_all_stack_traces(jvmtiStackTraces *traces); 190 191 // The function that gets called by the client to retrieve the list 192 // of stack traces. Passes a jvmtiStackTraces which will get mutated. 193 void get_garbage_stack_traces(jvmtiStackTraces *traces); 194 195 // The function that gets called by the client to retrieve the list 196 // of stack traces. Passes a jvmtiStackTraces which will get mutated. 197 void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces); 198 199 // Executes whenever weak references are traversed. is_alive tells 200 // you if the given oop is still reachable and live. 201 size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); 202 203 ~StackTraceStorage(); 204 StackTraceStorage(); 205 206 static StackTraceStorage* storage() { 207 if (internal_storage == NULL) { 208 internal_storage = new StackTraceStorage(); 209 } 210 return internal_storage; 211 } 212 213 static void reset_stack_trace_storage() { 214 delete internal_storage; 215 internal_storage = NULL; 216 } 217 218 bool is_initialized() { 219 return _initialized; 220 } 221 222 const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { 223 return _stats; 224 } 225 226 // Static method to set the storage in place at initialization. 227 static void initialize_stack_trace_storage(int max_storage) { 228 reset_stack_trace_storage(); 229 StackTraceStorage *storage = StackTraceStorage::storage(); 230 storage->initialize_storage(max_storage); 231 } 232 233 void accumulate_sample_rate(size_t rate) { 234 _stats.sample_rate_accumulation += rate; 235 _stats.sample_rate_count++; 236 } 237 238 bool initialized() { return _initialized; } 239 volatile bool *initialized_address() { return &_initialized; } 240 241 private: 242 // The traces currently sampled. 243 GrowableArray<StackTraceData> *_allocated_traces; 244 245 // Recent garbage traces. 246 MostRecentGarbageTraces *_recent_garbage_traces; 247 248 // Frequent garbage traces. 249 FrequentGarbageTraces *_frequent_garbage_traces; 250 251 // Heap Sampling statistics. 252 jvmtiHeapSamplingStats _stats; 253 254 // Maximum amount of storage provided by the JVMTI call initialize_profiling. 255 int _max_gc_storage; 256 257 static StackTraceStorage* internal_storage; 258 volatile bool _initialized; 259 260 // Support functions and classes for copying data to the external 261 // world. 262 class StackTraceDataCopier { 263 public: 264 virtual int size() const = 0; 265 virtual const StackTraceData *get(uint32_t i) const = 0; 266 }; 267 268 class LiveStackTraceDataCopier : public StackTraceDataCopier { 269 public: 270 LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) : 271 _data(data) {} 272 int size() const { return _data ? _data->length() : 0; } 273 const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); } 274 275 private: 276 GrowableArray<StackTraceData> *_data; 277 }; 278 279 class GarbageStackTraceDataCopier : public StackTraceDataCopier { 280 public: 281 GarbageStackTraceDataCopier(StackTraceData **data, int size) : 282 _data(data), _size(size) {} 283 int size() const { return _size; } 284 const StackTraceData *get(uint32_t i) const { return _data[i]; } 285 286 private: 287 StackTraceData **_data; 288 int _size; 289 }; 290 291 // Instance initialization. 292 void initialize_storage(int max_storage); 293 294 // Copies from StackTraceData to jvmtiStackTrace. 295 bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from); 296 297 // Creates a deep copy of the list of StackTraceData. 298 void copy_stack_traces(const StackTraceDataCopier &copier, 299 jvmtiStackTraces *traces); 300 301 void store_garbage_trace(const StackTraceData &trace); 302 303 void free_garbage(); 304 }; 305 306 StackTraceStorage* StackTraceStorage::internal_storage; 307 308 // Statics for Sampler 309 double HeapMonitoring::_log_table[1 << FastLogNumBits]; 310 bool HeapMonitoring::_enabled; 311 AlwaysTrueClosure HeapMonitoring::_always_true; 312 jint HeapMonitoring::_monitoring_rate; 313 314 // Cheap random number generator 315 uint64_t HeapMonitoring::_rnd; 316 317 StackTraceStorage::StackTraceStorage() : 318 _allocated_traces(NULL), 319 _recent_garbage_traces(NULL), 320 _frequent_garbage_traces(NULL), 321 _max_gc_storage(0), 322 _initialized(false) { 323 memset(&_stats, 0, sizeof(_stats)); 324 } 325 326 void StackTraceStorage::free_garbage() { 327 StackTraceData **recent_garbage = NULL; 328 uint32_t recent_size = 0; 329 330 StackTraceData **frequent_garbage = NULL; 331 uint32_t frequent_size = 0; 332 333 if (_recent_garbage_traces != NULL) { 334 recent_garbage = _recent_garbage_traces->get_traces(); 335 recent_size = _recent_garbage_traces->size(); 336 } 337 338 if (_frequent_garbage_traces != NULL) { 339 frequent_garbage = _frequent_garbage_traces->get_traces(); 340 frequent_size = _frequent_garbage_traces->size(); 341 } 342 343 // Simple solution since this happens at exit. 344 // Go through the recent and remove any that only are referenced there. 345 for (uint32_t i = 0; i < recent_size; i++) { 346 StackTraceData *trace = recent_garbage[i]; 347 if (trace != NULL) { 348 trace->references--; 349 350 if (trace->references == 0) { 351 StackTraceData::free_data(trace); 352 } 353 } 354 } 355 356 // Then go through the frequent and remove those that are now only there. 357 for (uint32_t i = 0; i < frequent_size; i++) { 358 StackTraceData *trace = frequent_garbage[i]; 359 if (trace != NULL) { 360 trace->references--; 361 362 if (trace->references == 0) { 363 StackTraceData::free_data(trace); 364 } 365 } 366 } 367 } 368 369 StackTraceStorage::~StackTraceStorage() { 370 delete _allocated_traces; 371 372 free_garbage(); 373 delete _recent_garbage_traces; 374 delete _frequent_garbage_traces; 375 _initialized = false; 376 } 377 378 void StackTraceStorage::initialize_storage(int max_gc_storage) { 379 // In case multiple threads got locked and then 1 by 1 got through. 380 if (_initialized) { 381 return; 382 } 383 384 _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) 385 GrowableArray<StackTraceData>(128, true); 386 387 _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); 388 _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); 389 390 _max_gc_storage = max_gc_storage; 391 _initialized = true; 392 } 393 394 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { 395 StackTraceData new_data(trace, o); 396 _stats.sample_count++; 397 _stats.stack_depth_accumulation += trace->frame_count; 398 _allocated_traces->append(new_data); 399 } 400 401 size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, 402 OopClosure *f) { 403 size_t count = 0; 404 if (is_initialized()) { 405 int len = _allocated_traces->length(); 406 407 // Compact the oop traces. Moves the live oops to the beginning of the 408 // growable array, potentially overwriting the dead ones. 409 int curr_pos = 0; 410 for (int i = 0; i < len; i++) { 411 StackTraceData &trace = _allocated_traces->at(i); 412 oop value = trace.obj; 413 if (Universe::heap()->is_in_reserved(value) 414 && is_alive->do_object_b(value)) { 415 // Update the oop to point to the new object if it is still alive. 416 f->do_oop(&(trace.obj)); 417 418 // Copy the old trace, if it is still live. 419 _allocated_traces->at_put(curr_pos++, trace); 420 421 count++; 422 } else { 423 // If the old trace is no longer live, add it to the list of 424 // recently collected garbage. 425 store_garbage_trace(trace); 426 } 427 } 428 429 // Zero out remaining array elements. Even though the call to trunc_to 430 // below truncates these values, zeroing them out is good practice. 431 StackTraceData zero_trace; 432 for (int i = curr_pos; i < len; i++) { 433 _allocated_traces->at_put(i, zero_trace); 434 } 435 436 // Set the array's length to the number of live elements. 437 _allocated_traces->trunc_to(curr_pos); 438 } 439 440 return count; 441 } 442 443 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to, 444 const StackTraceData *from) { 445 const jvmtiStackTrace *src = from->trace; 446 *to = *src; 447 448 to->frames = 449 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal); 450 451 if (to->frames == NULL) { 452 return false; 453 } 454 455 memcpy(to->frames, 456 src->frames, 457 sizeof(jvmtiFrameInfo) * src->frame_count); 458 return true; 459 } 460 461 // Called by the outside world; returns a copy of the stack traces 462 // (because we could be replacing them as the user handles them). 463 // The array is secretly null-terminated (to make it easier to reclaim). 464 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) { 465 LiveStackTraceDataCopier copier(_allocated_traces); 466 copy_stack_traces(copier, traces); 467 } 468 469 // See comment on get_all_stack_traces 470 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) { 471 GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), 472 _recent_garbage_traces->size()); 473 copy_stack_traces(copier, traces); 474 } 475 476 // See comment on get_all_stack_traces 477 void StackTraceStorage::get_frequent_garbage_stack_traces( 478 jvmtiStackTraces *traces) { 479 GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), 480 _frequent_garbage_traces->size()); 481 copy_stack_traces(copier, traces); 482 } 483 484 485 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, 486 jvmtiStackTraces *traces) { 487 int len = copier.size(); 488 489 // Create a new array to store the StackTraceData objects. 490 // + 1 for a NULL at the end. 491 jvmtiStackTrace *t = 492 NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal); 493 if (t == NULL) { 494 traces->stack_traces = NULL; 495 traces->trace_count = 0; 496 return; 497 } 498 // +1 to have a NULL at the end of the array. 499 memset(t, 0, (len + 1) * sizeof(*t)); 500 501 // Copy the StackTraceData objects into the new array. 502 int trace_count = 0; 503 for (int i = 0; i < len; i++) { 504 const StackTraceData *stack_trace = copier.get(i); 505 if (stack_trace != NULL) { 506 jvmtiStackTrace *to = &t[trace_count]; 507 if (!deep_copy(to, stack_trace)) { 508 continue; 509 } 510 trace_count++; 511 } 512 } 513 514 traces->stack_traces = t; 515 traces->trace_count = trace_count; 516 } 517 518 void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) { 519 StackTraceData *new_trace = new StackTraceData(); 520 *new_trace = trace; 521 522 bool accepted = _recent_garbage_traces->store_trace(new_trace); 523 524 // Accepted is on the right of the boolean to force the store_trace to happen. 525 accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted; 526 527 if (!accepted) { 528 // No one wanted to use it. 529 delete new_trace; 530 } 531 532 _stats.garbage_collected_samples++; 533 } 534 535 // Delegate the initialization question to the underlying storage system. 536 bool HeapMonitoring::initialized() { 537 return StackTraceStorage::storage()->initialized(); 538 } 539 540 // Delegate the initialization question to the underlying storage system. 541 bool *HeapMonitoring::initialized_address() { 542 return 543 const_cast<bool*>(StackTraceStorage::storage()->initialized_address()); 544 } 545 546 void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) { 547 StackTraceStorage::storage()->get_all_stack_traces(traces); 548 } 549 550 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) { 551 const jvmtiHeapSamplingStats& internal_stats = 552 StackTraceStorage::storage()->get_heap_sampling_stats(); 553 *stats = internal_stats; 554 } 555 556 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) { 557 StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces); 558 } 559 560 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) { 561 StackTraceStorage::storage()->get_garbage_stack_traces(traces); 562 } 563 564 void HeapMonitoring::release_traces(jvmtiStackTraces *traces) { 565 jint trace_count = traces->trace_count; 566 jvmtiStackTrace *stack_traces = traces->stack_traces; 567 568 for (jint i = 0; i < trace_count; i++) { 569 jvmtiStackTrace *current_trace = stack_traces + i; 570 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames); 571 } 572 573 FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces); 574 traces->trace_count = 0; 575 traces->stack_traces = NULL; 576 } 577 578 // Invoked by the GC to clean up old stack traces and remove old arrays 579 // of instrumentation that are still lying around. 580 size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, 581 OopClosure *f) { 582 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 583 return StackTraceStorage::storage()->weak_oops_do(is_alive, f); 584 } 585 586 void HeapMonitoring::initialize_profiling(jint monitoring_rate, 587 jint max_gc_storage) { 588 // Ignore if already enabled. 589 if (_enabled) { 590 return; 591 } 592 593 _monitoring_rate = monitoring_rate; 594 595 // Initalize and reset. 596 StackTraceStorage::initialize_stack_trace_storage(max_gc_storage); 597 598 // Populate the lookup table for fast_log2. 599 // This approximates the log2 curve with a step function. 600 // Steps have height equal to log2 of the mid-point of the step. 601 for (int i = 0; i < (1 << FastLogNumBits); i++) { 602 double half_way = static_cast<double>(i + 0.5); 603 _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0)); 604 } 605 606 JavaThread *t = static_cast<JavaThread *>(Thread::current()); 607 _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t)); 608 if (_rnd == 0) { 609 _rnd = 1; 610 } 611 _enabled = true; 612 } 613 614 void HeapMonitoring::stop_profiling() { 615 _enabled = false; 616 } 617 618 // Generates a geometric variable with the specified mean (512K by default). 619 // This is done by generating a random number between 0 and 1 and applying 620 // the inverse cumulative distribution function for an exponential. 621 // Specifically: Let m be the inverse of the sample rate, then 622 // the probability distribution function is m*exp(-mx) so the CDF is 623 // p = 1 - exp(-mx), so 624 // q = 1 - p = exp(-mx) 625 // log_e(q) = -mx 626 // -log_e(q)/m = x 627 // log_2(q) * (-log_e(2) * 1/m) = x 628 // In the code, q is actually in the range 1 to 2**26, hence the -26 below 629 void HeapMonitoring::pick_next_sample(size_t *ptr) { 630 _rnd = next_random(_rnd); 631 // Take the top 26 bits as the random number 632 // (This plus a 1<<58 sampling bound gives a max possible step of 633 // 5194297183973780480 bytes. In this case, 634 // for sample_parameter = 1<<19, max possible step is 635 // 9448372 bytes (24 bits). 636 const uint64_t prng_mod_power = 48; // Number of bits in prng 637 // The uint32_t cast is to prevent a (hard-to-reproduce) NAN 638 // under piii debug for some binaries. 639 double q = static_cast<uint32_t>(_rnd >> (prng_mod_power - 26)) + 1.0; 640 // Put the computed p-value through the CDF of a geometric. 641 // For faster performance (save ~1/20th exec time), replace 642 // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705) 643 // The value 26.000705 is used rather than 26 to compensate 644 // for inaccuracies in FastLog2 which otherwise result in a 645 // negative answer. 646 double log_val = (fast_log2(q) - 26); 647 size_t rate = static_cast<size_t>( 648 (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1); 649 *ptr = rate; 650 651 StackTraceStorage::storage()->accumulate_sample_rate(rate); 652 } 653 654 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { 655 #if defined(X86) || defined(PPC) 656 JavaThread *thread = static_cast<JavaThread *>(t); 657 if (StackTraceStorage::storage()->is_initialized()) { 658 assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); 659 JavaThread *thread = static_cast<JavaThread *>(t); 660 661 jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); 662 if (trace == NULL) { 663 return; 664 } 665 666 jvmtiFrameInfo *frames = 667 NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); 668 669 if (frames == NULL) { 670 FREE_C_HEAP_OBJ(trace); 671 return; 672 } 673 674 trace->frames = frames; 675 trace->thread_id = SharedRuntime::get_java_tid(thread); 676 trace->size = byte_size; 677 trace->frame_count = 0; 678 679 if (thread->has_last_Java_frame()) { // just to be safe 680 vframeStream vfst(thread, true); 681 int count = 0; 682 while (!vfst.at_end() && count < MaxStackDepth) { 683 Method* m = vfst.method(); 684 frames[count].location = vfst.bci(); 685 frames[count].method = m->jmethod_id(); 686 count++; 687 688 vfst.next(); 689 } 690 trace->frame_count = count; 691 } 692 693 if (trace->frame_count> 0) { 694 // Success! 695 StackTraceStorage::storage()->add_trace(trace, o); 696 return; 697 } 698 699 // Failure! 700 FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); 701 FREE_C_HEAP_OBJ(trace); 702 return; 703 } else { 704 // There is something like 64K worth of allocation before the VM 705 // initializes. This is just in the interests of not slowing down 706 // startup. 707 assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); 708 } 709 #else 710 Unimplemented(); 711 #endif 712 }