1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/heapMonitoring.hpp"
  30 #include "runtime/vframe.hpp"
  31 
  32 static const int MaxStackDepth = 1024;
  33 
  34 // Internal data structure representing traces.
  35 struct StackTraceData : CHeapObj<mtInternal> {
  36   jvmtiStackTrace *trace;
  37   oop obj;
  38   int references;
  39 
  40   StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
  41 
  42   StackTraceData() : trace(NULL), obj(NULL), references(0) {}
  43 
  44   // StackTraceDatas are shared around the board between various lists. So
  45   // handle this by hand instead of having this in the destructor. There are
  46   // cases where the struct is on the stack but holding heap data not to be
  47   // freed.
  48   static void free_data(StackTraceData *data) {
  49     if (data->trace != NULL) {
  50       FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
  51       FREE_C_HEAP_OBJ(data->trace);
  52     }
  53     delete data;
  54   }
  55 };
  56 
  57 // Fixed size buffer for holding garbage traces.
  58 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  59  public:
  60   GarbageTracesBuffer(uint32_t size) : _size(size) {
  61     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
  62                                        size,
  63                                        mtInternal);
  64     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
  65   }
  66 
  67   virtual ~GarbageTracesBuffer() {
  68     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
  69   }
  70 
  71   StackTraceData** get_traces() const {
  72     return _garbage_traces;
  73   }
  74 
  75   bool store_trace(StackTraceData *trace) {
  76     uint32_t index;
  77     if (!select_replacement(&index)) {
  78       return false;
  79     }
  80 
  81     StackTraceData *old_data = _garbage_traces[index];
  82 
  83     if (old_data != NULL) {
  84       old_data->references--;
  85 
  86       if (old_data->references == 0) {
  87         StackTraceData::free_data(old_data);
  88       }
  89     }
  90 
  91     trace->references++;
  92     _garbage_traces[index] = trace;
  93     return true;
  94   }
  95 
  96   uint32_t size() const {
  97     return _size;
  98   }
  99 
 100  protected:
 101   // Subclasses select the trace to replace. Returns false if no replacement
 102   // is to happen, otherwise stores the index of the trace to replace in
 103   // *index.
 104   virtual bool select_replacement(uint32_t *index) = 0;
 105 
 106   const uint32_t _size;
 107 
 108  private:
 109   // The current garbage traces.  A fixed-size ring buffer.
 110   StackTraceData **_garbage_traces;
 111 };
 112 
 113 // Keep statistical sample of traces over the lifetime of the server.
 114 // When the buffer is full, replace a random entry with probability
 115 // 1/samples_seen. This strategy tends towards preserving the most frequently
 116 // occuring traces over time.
 117 class FrequentGarbageTraces : public GarbageTracesBuffer {
 118  public:
 119   FrequentGarbageTraces(int size)
 120       : GarbageTracesBuffer(size),
 121       _garbage_traces_pos(0),
 122       _samples_seen(0) {
 123       }
 124 
 125   virtual ~FrequentGarbageTraces() {
 126   }
 127 
 128   virtual bool select_replacement(uint32_t* index) {
 129     ++_samples_seen;
 130 
 131     if (_garbage_traces_pos < _size) {
 132       *index = _garbage_traces_pos++;
 133       return true;
 134     }
 135 
 136     uint64_t random_uint64 =
 137         (static_cast<uint64_t>(::random()) << 32) | ::random();
 138 
 139     uint32_t random_index = random_uint64 % _samples_seen;
 140     if (random_index < _size) {
 141       *index = random_index;
 142       return true;
 143     }
 144 
 145     return false;
 146   }
 147 
 148  private:
 149   // The current position in the buffer as we initially fill it.
 150   uint32_t _garbage_traces_pos;
 151 
 152   uint64_t _samples_seen;
 153 };
 154 
 155 // Store most recent garbage traces.
 156 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 157  public:
 158   MostRecentGarbageTraces(int size)
 159       : GarbageTracesBuffer(size),
 160       _garbage_traces_pos(0) {
 161       }
 162 
 163   virtual ~MostRecentGarbageTraces() {
 164   }
 165 
 166   virtual bool select_replacement(uint32_t* index) {
 167     *index = _garbage_traces_pos;
 168 
 169     _garbage_traces_pos =
 170         (_garbage_traces_pos + 1) % _size;
 171 
 172     return true;
 173   }
 174 
 175  private:
 176   // The current position in the buffer.
 177   uint32_t _garbage_traces_pos;
 178 };
 179 
 180 // Each object that we profile is stored as trace with the thread_id.
 181 class StackTraceStorage : public CHeapObj<mtInternal> {
 182  public:
 183   // The function that gets called to add a trace to the list of
 184   // traces we are maintaining.
 185   void add_trace(jvmtiStackTrace *trace, oop o);
 186 
 187   // The function that gets called by the client to retrieve the list
 188   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 189   void get_all_stack_traces(jvmtiStackTraces *traces);
 190 
 191   // The function that gets called by the client to retrieve the list
 192   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 193   void get_garbage_stack_traces(jvmtiStackTraces *traces);
 194 
 195   // The function that gets called by the client to retrieve the list
 196   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 197   void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces);
 198 
 199   // Executes whenever weak references are traversed.  is_alive tells
 200   // you if the given oop is still reachable and live.
 201   void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f);
 202 
 203   ~StackTraceStorage();
 204   StackTraceStorage();
 205 
 206   static StackTraceStorage* storage() {
 207     static StackTraceStorage internal_storage;
 208     return &internal_storage;
 209   }
 210 
 211   void initialize(int max_storage) {
 212     MutexLocker mu(HeapMonitorStorage_lock);
 213     free_storage();
 214     allocate_storage(max_storage);
 215     memset(&_stats, 0, sizeof(_stats));
 216   }
 217 
 218   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 219     return _stats;
 220   }
 221 
 222   void accumulate_sample_rate(size_t rate) {
 223     _stats.sample_rate_accumulation += rate;
 224     _stats.sample_rate_count++;
 225   }
 226 
 227   bool initialized() { return _initialized; }
 228 
 229  private:
 230   // The traces currently sampled.
 231   GrowableArray<StackTraceData> *_allocated_traces;
 232 
 233   // Recent garbage traces.
 234   MostRecentGarbageTraces *_recent_garbage_traces;
 235 
 236   // Frequent garbage traces.
 237   FrequentGarbageTraces *_frequent_garbage_traces;
 238 
 239   // Heap Sampling statistics.
 240   jvmtiHeapSamplingStats _stats;
 241 
 242   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 243   int _max_gc_storage;
 244 
 245   static StackTraceStorage* internal_storage;
 246   volatile bool _initialized;
 247 
 248   // Support functions and classes for copying data to the external
 249   // world.
 250   class StackTraceDataCopier {
 251    public:
 252     virtual int size() const = 0;
 253     virtual const StackTraceData *get(uint32_t i) const = 0;
 254   };
 255 
 256   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 257    public:
 258     LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) :
 259         _data(data) {}
 260     int size() const { return _data ? _data->length() : 0; }
 261     const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
 262 
 263    private:
 264     GrowableArray<StackTraceData> *_data;
 265   };
 266 
 267   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 268    public:
 269     GarbageStackTraceDataCopier(StackTraceData **data, int size) :
 270         _data(data), _size(size) {}
 271     int size() const { return _size; }
 272     const StackTraceData *get(uint32_t i) const { return _data[i]; }
 273 
 274    private:
 275     StackTraceData **_data;
 276     int _size;
 277   };
 278 
 279   // Copies from StackTraceData to jvmtiStackTrace.
 280   bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
 281 
 282   // Creates a deep copy of the list of StackTraceData.
 283   void copy_stack_traces(const StackTraceDataCopier &copier,
 284                          jvmtiStackTraces *traces);
 285 
 286   void store_garbage_trace(const StackTraceData &trace);
 287 
 288   void free_garbage();
 289   void free_storage();
 290   void allocate_storage(int max_gc_storage);
 291 };
 292 
 293 StackTraceStorage* StackTraceStorage::internal_storage;
 294 
 295 // Statics for Sampler
 296 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 297 bool HeapMonitoring::_enabled;
 298 jint HeapMonitoring::_monitoring_rate;
 299 
 300 // Cheap random number generator
 301 uint64_t HeapMonitoring::_rnd;
 302 
 303 StackTraceStorage::StackTraceStorage() :
 304   _allocated_traces(NULL),
 305   _recent_garbage_traces(NULL),
 306   _frequent_garbage_traces(NULL),
 307   _max_gc_storage(0),
 308   _initialized(false) {
 309 }
 310 
 311 void StackTraceStorage::free_garbage() {
 312   StackTraceData **recent_garbage = NULL;
 313   uint32_t recent_size = 0;
 314 
 315   StackTraceData **frequent_garbage = NULL;
 316   uint32_t frequent_size = 0;
 317 
 318   if (_recent_garbage_traces != NULL) {
 319     recent_garbage = _recent_garbage_traces->get_traces();
 320     recent_size = _recent_garbage_traces->size();
 321   }
 322 
 323   if (_frequent_garbage_traces != NULL) {
 324     frequent_garbage = _frequent_garbage_traces->get_traces();
 325     frequent_size = _frequent_garbage_traces->size();
 326   }
 327 
 328   // Simple solution since this happens at exit.
 329   // Go through the recent and remove any that only are referenced there.
 330   for (uint32_t i = 0; i < recent_size; i++) {
 331     StackTraceData *trace = recent_garbage[i];
 332     if (trace != NULL) {
 333       trace->references--;
 334 
 335       if (trace->references == 0) {
 336         StackTraceData::free_data(trace);
 337       }
 338     }
 339   }
 340 
 341   // Then go through the frequent and remove those that are now only there.
 342   for (uint32_t i = 0; i < frequent_size; i++) {
 343     StackTraceData *trace = frequent_garbage[i];
 344     if (trace != NULL) {
 345       trace->references--;
 346 
 347       if (trace->references == 0) {
 348         StackTraceData::free_data(trace);
 349       }
 350     }
 351   }
 352 }
 353 
 354 void StackTraceStorage::free_storage() {
 355   delete _allocated_traces;
 356 
 357   free_garbage();
 358   delete _recent_garbage_traces;
 359   delete _frequent_garbage_traces;
 360   _initialized = false;
 361 }
 362 
 363 StackTraceStorage::~StackTraceStorage() {
 364   free_storage();
 365 }
 366 
 367 void StackTraceStorage::allocate_storage(int max_gc_storage) {
 368   // In case multiple threads got locked and then 1 by 1 got through.
 369   if (_initialized) {
 370     return;
 371   }
 372 
 373   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 374       GrowableArray<StackTraceData>(128, true);
 375 
 376   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 377   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 378 
 379   _max_gc_storage = max_gc_storage;
 380   _initialized = true;
 381 }
 382 
 383 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
 384   MutexLocker mu(HeapMonitorStorage_lock);
 385   StackTraceData new_data(trace, o);
 386   _stats.sample_count++;
 387   _stats.stack_depth_accumulation += trace->frame_count;
 388   _allocated_traces->append(new_data);
 389 }
 390 
 391 void StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
 392                                      OopClosure *f) {
 393   MutexLocker mu(HeapMonitorStorage_lock);
 394   size_t count = 0;
 395   if (initialized()) {
 396     int len = _allocated_traces->length();
 397 
 398     // Compact the oop traces.  Moves the live oops to the beginning of the
 399     // growable array, potentially overwriting the dead ones.
 400     int curr_pos = 0;
 401     for (int i = 0; i < len; i++) {
 402       StackTraceData &trace = _allocated_traces->at(i);
 403       oop value = trace.obj;
 404       if (Universe::heap()->is_in_reserved(value)
 405           && is_alive->do_object_b(value)) {
 406         // Update the oop to point to the new object if it is still alive.
 407         f->do_oop(&(trace.obj));
 408 
 409         // Copy the old trace, if it is still live.
 410         _allocated_traces->at_put(curr_pos++, trace);
 411 
 412         count++;
 413       } else {
 414         // If the old trace is no longer live, add it to the list of
 415         // recently collected garbage.
 416         store_garbage_trace(trace);
 417       }
 418     }
 419 
 420     // Zero out remaining array elements.  Even though the call to trunc_to
 421     // below truncates these values, zeroing them out is good practice.
 422     StackTraceData zero_trace;
 423     for (int i = curr_pos; i < len; i++) {
 424       _allocated_traces->at_put(i, zero_trace);
 425     }
 426 
 427     // Set the array's length to the number of live elements.
 428     _allocated_traces->trunc_to(curr_pos);
 429   }
 430 
 431   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 432 }
 433 
 434 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to,
 435                                   const StackTraceData *from) {
 436   const jvmtiStackTrace *src = from->trace;
 437   *to = *src;
 438 
 439   to->frames =
 440       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
 441 
 442   if (to->frames == NULL) {
 443     return false;
 444   }
 445 
 446   memcpy(to->frames,
 447          src->frames,
 448          sizeof(jvmtiFrameInfo) * src->frame_count);
 449   return true;
 450 }
 451 
 452 // Called by the outside world; returns a copy of the stack traces
 453 // (because we could be replacing them as the user handles them).
 454 // The array is secretly null-terminated (to make it easier to reclaim).
 455 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) {
 456   LiveStackTraceDataCopier copier(_allocated_traces);
 457   copy_stack_traces(copier, traces);
 458 }
 459 
 460 // See comment on get_all_stack_traces
 461 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
 462   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 463                                      _recent_garbage_traces->size());
 464   copy_stack_traces(copier, traces);
 465 }
 466 
 467 // See comment on get_all_stack_traces
 468 void StackTraceStorage::get_frequent_garbage_stack_traces(
 469     jvmtiStackTraces *traces) {
 470   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 471                                      _frequent_garbage_traces->size());
 472   copy_stack_traces(copier, traces);
 473 }
 474 
 475 
 476 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
 477                                           jvmtiStackTraces *traces) {
 478   MutexLocker mu(HeapMonitorStorage_lock);
 479   int len = copier.size();
 480 
 481   // Create a new array to store the StackTraceData objects.
 482   // + 1 for a NULL at the end.
 483   jvmtiStackTrace *t =
 484       NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
 485   if (t == NULL) {
 486     traces->stack_traces = NULL;
 487     traces->trace_count = 0;
 488     return;
 489   }
 490   // +1 to have a NULL at the end of the array.
 491   memset(t, 0, (len + 1) * sizeof(*t));
 492 
 493   // Copy the StackTraceData objects into the new array.
 494   int trace_count = 0;
 495   for (int i = 0; i < len; i++) {
 496     const StackTraceData *stack_trace = copier.get(i);
 497     if (stack_trace != NULL) {
 498       jvmtiStackTrace *to = &t[trace_count];
 499       if (!deep_copy(to, stack_trace)) {
 500         continue;
 501       }
 502       trace_count++;
 503     }
 504   }
 505 
 506   traces->stack_traces = t;
 507   traces->trace_count = trace_count;
 508 }
 509 
 510 void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) {
 511   StackTraceData *new_trace = new StackTraceData();
 512   *new_trace = trace;
 513 
 514   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 515 
 516   // Accepted is on the right of the boolean to force the store_trace to happen.
 517   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 518 
 519   if (!accepted) {
 520     // No one wanted to use it.
 521     delete new_trace;
 522   }
 523 
 524   _stats.garbage_collected_samples++;
 525 }
 526 
 527 void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) {
 528   StackTraceStorage::storage()->get_all_stack_traces(traces);
 529 }
 530 
 531 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) {
 532   const jvmtiHeapSamplingStats& internal_stats =
 533       StackTraceStorage::storage()->get_heap_sampling_stats();
 534   *stats = internal_stats;
 535 }
 536 
 537 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) {
 538   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
 539 }
 540 
 541 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) {
 542   StackTraceStorage::storage()->get_garbage_stack_traces(traces);
 543 }
 544 
 545 void HeapMonitoring::release_traces(jvmtiStackTraces *traces) {
 546   jint trace_count = traces->trace_count;
 547   jvmtiStackTrace *stack_traces = traces->stack_traces;
 548 
 549   for (jint i = 0; i < trace_count; i++) {
 550     jvmtiStackTrace *current_trace = stack_traces + i;
 551     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
 552   }
 553 
 554   FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
 555   traces->trace_count = 0;
 556   traces->stack_traces = NULL;
 557 }
 558 
 559 // Invoked by the GC to clean up old stack traces and remove old arrays
 560 // of instrumentation that are still lying around.
 561 void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f) {
 562   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 563   StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 564 }
 565 
 566 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
 567                                           jint max_gc_storage) {
 568   MutexLocker mu(HeapMonitor_lock);
 569   // Ignore if already enabled.
 570   if (_enabled) {
 571     return;
 572   }
 573 
 574   _monitoring_rate = monitoring_rate;
 575 
 576   // Populate the lookup table for fast_log2.
 577   // This approximates the log2 curve with a step function.
 578   // Steps have height equal to log2 of the mid-point of the step.
 579   for (int i = 0; i < (1 << FastLogNumBits); i++) {
 580     double half_way = static_cast<double>(i + 0.5);
 581     _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
 582   }
 583 
 584   JavaThread *t = static_cast<JavaThread *>(Thread::current());
 585   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 586   if (_rnd == 0) {
 587     _rnd = 1;
 588   }
 589 
 590   StackTraceStorage::storage()->initialize(max_gc_storage);
 591   _enabled = true;
 592 }
 593 
 594 void HeapMonitoring::stop_profiling() {
 595   MutexLocker mu(HeapMonitor_lock);
 596   _enabled = false;
 597 }
 598 
 599 // Generates a geometric variable with the specified mean (512K by default).
 600 // This is done by generating a random number between 0 and 1 and applying
 601 // the inverse cumulative distribution function for an exponential.
 602 // Specifically: Let m be the inverse of the sample rate, then
 603 // the probability distribution function is m*exp(-mx) so the CDF is
 604 // p = 1 - exp(-mx), so
 605 // q = 1 - p = exp(-mx)
 606 // log_e(q) = -mx
 607 // -log_e(q)/m = x
 608 // log_2(q) * (-log_e(2) * 1/m) = x
 609 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 610 void HeapMonitoring::pick_next_sample(size_t *ptr) {
 611   _rnd = next_random(_rnd);
 612   // Take the top 26 bits as the random number
 613   // (This plus a 1<<58 sampling bound gives a max possible step of
 614   // 5194297183973780480 bytes.  In this case,
 615   // for sample_parameter = 1<<19, max possible step is
 616   // 9448372 bytes (24 bits).
 617   const uint64_t PrngModPower = 48;  // Number of bits in prng
 618   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 619   // under piii debug for some binaries.
 620   double q = static_cast<uint32_t>(_rnd >> (PrngModPower - 26)) + 1.0;
 621   // Put the computed p-value through the CDF of a geometric.
 622   // For faster performance (save ~1/20th exec time), replace
 623   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 624   // The value 26.000705 is used rather than 26 to compensate
 625   // for inaccuracies in FastLog2 which otherwise result in a
 626   // negative answer.
 627   double log_val = (fast_log2(q) - 26);
 628   size_t rate = static_cast<size_t>(
 629       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 630   *ptr = rate;
 631 
 632   StackTraceStorage::storage()->accumulate_sample_rate(rate);
 633 }
 634 
 635 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
 636 #if defined(X86) || defined(PPC)
 637   JavaThread *thread = static_cast<JavaThread *>(t);
 638   if (StackTraceStorage::storage()->initialized()) {
 639     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 640     JavaThread *thread = static_cast<JavaThread *>(t);
 641 
 642     jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
 643     if (trace == NULL) {
 644       return;
 645     }
 646 
 647     jvmtiFrameInfo *frames =
 648         NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 649 
 650     if (frames == NULL) {
 651       FREE_C_HEAP_OBJ(trace);
 652       return;
 653     }
 654 
 655     trace->frames = frames;
 656     trace->thread_id = SharedRuntime::get_java_tid(thread);
 657     trace->size = byte_size;
 658     trace->frame_count = 0;
 659 
 660     if (thread->has_last_Java_frame()) { // just to be safe
 661       vframeStream vfst(thread, true);
 662       int count = 0;
 663       while (!vfst.at_end() && count < MaxStackDepth) {
 664         Method* m = vfst.method();
 665         frames[count].location = vfst.bci();
 666         frames[count].method = m->jmethod_id();
 667         count++;
 668 
 669         vfst.next();
 670       }
 671       trace->frame_count = count;
 672     }
 673 
 674     if (trace->frame_count> 0) {
 675       // Success!
 676       StackTraceStorage::storage()->add_trace(trace, o);
 677       return;
 678     }
 679 
 680     // Failure!
 681     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);
 682     FREE_C_HEAP_OBJ(trace);
 683   }
 684 #else
 685   Unimplemented();
 686 #endif
 687 }