1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "prims/forte.hpp"
  27 #include "runtime/heapMonitoring.hpp"
  28 
  29 
  30 static const int MaxStackDepth = 64;
  31 
  32 // Internal data structure representing traces.
  33 struct StackTraceData : CHeapObj<mtInternal> {
  34   jvmtiStackTrace *trace;
  35   oop obj;
  36   int references;
  37 
  38   StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
  39 
  40   StackTraceData() : trace(NULL), obj(NULL), references(0) {}
  41 
  42   // StackTraceDatas are shared around the board between various lists. So
  43   // handle this by hand instead of having this in the destructor. There are
  44   // cases where the struct is on the stack but holding heap data not to be
  45   // freed.
  46   static void free_data(StackTraceData *data) {
  47     if (data->trace != NULL) {
  48       FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
  49       FREE_C_HEAP_OBJ(data->trace);
  50     }
  51     delete data;
  52   }
  53 };
  54 
  55 // Fixed size buffer for holding garbage traces.
  56 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  57  public:
  58   GarbageTracesBuffer(uint32_t size) : _size(size) {
  59     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
  60                                        size,
  61                                        mtInternal);
  62     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
  63   }
  64 
  65   virtual ~GarbageTracesBuffer() {
  66     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
  67   }
  68 
  69   StackTraceData** get_traces() const {
  70     return _garbage_traces;
  71   }
  72 
  73   bool store_trace(StackTraceData *trace) {
  74     uint32_t index;
  75     if (!select_replacement(&index)) {
  76       return false;
  77     }
  78 
  79     StackTraceData *old_data = _garbage_traces[index];
  80 
  81     if (old_data != NULL) {
  82       old_data->references--;
  83 
  84       if (old_data->references == 0) {
  85         StackTraceData::free_data(old_data);
  86       }
  87     }
  88 
  89     trace->references++;
  90     _garbage_traces[index] = trace;
  91     return true;
  92   }
  93 
  94   uint32_t size() const {
  95     return _size;
  96   }
  97 
  98  protected:
  99   // Subclasses select the trace to replace. Returns false if no replacement
 100   // is to happen, otherwise stores the index of the trace to replace in
 101   // *index.
 102   virtual bool select_replacement(uint32_t *index) = 0;
 103 
 104   const uint32_t _size;
 105 
 106  private:
 107   // The current garbage traces.  A fixed-size ring buffer.
 108   StackTraceData **_garbage_traces;
 109 };
 110 
 111 // Keep statistical sample of traces over the lifetime of the server.
 112 // When the buffer is full, replace a random entry with probability
 113 // 1/samples_seen. This strategy tends towards preserving the most frequently
 114 // occuring traces over time.
 115 class FrequentGarbageTraces : public GarbageTracesBuffer {
 116  public:
 117   FrequentGarbageTraces(int size)
 118       : GarbageTracesBuffer(size),
 119       _garbage_traces_pos(0),
 120       _samples_seen(0) {
 121       }
 122 
 123   virtual ~FrequentGarbageTraces() {
 124   }
 125 
 126   virtual bool select_replacement(uint32_t* index) {
 127     ++_samples_seen;
 128 
 129     if (_garbage_traces_pos < _size) {
 130       *index = _garbage_traces_pos++;
 131       return true;
 132     }
 133 
 134     uint64_t random_uint64 =
 135         (static_cast<uint64_t>(::random()) << 32) | ::random();
 136 
 137     uint32_t random_index = random_uint64 % _samples_seen;
 138     if (random_index < _size) {
 139       *index = random_index;
 140       return true;
 141     }
 142 
 143     return false;
 144   }
 145 
 146  private:
 147   // The current position in the buffer as we initially fill it.
 148   uint32_t _garbage_traces_pos;
 149 
 150   uint64_t _samples_seen;
 151 };
 152 
 153 // Store most recent garbage traces.
 154 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 155  public:
 156   MostRecentGarbageTraces(int size)
 157       : GarbageTracesBuffer(size),
 158       _garbage_traces_pos(0) {
 159       }
 160 
 161   virtual ~MostRecentGarbageTraces() {
 162   }
 163 
 164   virtual bool select_replacement(uint32_t* index) {
 165     *index = _garbage_traces_pos;
 166 
 167     _garbage_traces_pos =
 168         (_garbage_traces_pos + 1) % _size;
 169 
 170     return true;
 171   }
 172 
 173  private:
 174   // The current position in the buffer.
 175   uint32_t _garbage_traces_pos;
 176 };
 177 
 178 // Each object that we profile is stored as trace with the thread_id.
 179 class StackTraceStorage : public CHeapObj<mtInternal> {
 180  public:
 181   // The function that gets called to add a trace to the list of
 182   // traces we are maintaining.
 183   void add_trace(jvmtiStackTrace *trace, oop o);
 184 
 185   // The function that gets called by the client to retrieve the list
 186   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 187   void get_all_stack_traces(jvmtiStackTraces *traces);
 188 
 189   // The function that gets called by the client to retrieve the list
 190   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 191   void get_garbage_stack_traces(jvmtiStackTraces *traces);
 192 
 193   // The function that gets called by the client to retrieve the list
 194   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 195   void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces);
 196 
 197   // Executes whenever weak references are traversed.  is_alive tells
 198   // you if the given oop is still reachable and live.
 199   size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f);
 200 
 201   ~StackTraceStorage();
 202   StackTraceStorage();
 203 
 204   static StackTraceStorage* storage() {
 205     if (internal_storage == NULL) {
 206       internal_storage = new StackTraceStorage();
 207     }
 208     return internal_storage;
 209   }
 210 
 211   static void reset_stack_trace_storage() {
 212     delete internal_storage, internal_storage = NULL;
 213   }
 214 
 215   bool is_initialized() {
 216     return _initialized;
 217   }
 218 
 219   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 220     return _stats;
 221   }
 222 
 223   // Static method to set the storage in place at initialization.
 224   static void initialize_stack_trace_storage(int max_storage) {
 225     reset_stack_trace_storage();
 226     StackTraceStorage *storage = StackTraceStorage::storage();
 227     storage->initialize_storage(max_storage);
 228   }
 229 
 230   void accumulate_sample_rate(size_t rate) {
 231     _stats.sample_rate_accumulation += rate;
 232     _stats.sample_rate_count++;
 233   }
 234 
 235   bool initialized() { return _initialized; }
 236   volatile bool *initialized_address() { return &_initialized; }
 237 
 238  private:
 239   // The traces currently sampled.
 240   GrowableArray<StackTraceData> *_allocated_traces;
 241 
 242   // Recent garbage traces.
 243   MostRecentGarbageTraces *_recent_garbage_traces;
 244 
 245   // Frequent garbage traces.
 246   FrequentGarbageTraces *_frequent_garbage_traces;
 247 
 248   // Heap Sampling statistics.
 249   jvmtiHeapSamplingStats _stats;
 250 
 251   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 252   int _max_storage;
 253 
 254   static StackTraceStorage* internal_storage;
 255   volatile bool _initialized;
 256 
 257   // Support functions and classes for copying data to the external
 258   // world.
 259   class StackTraceDataCopier {
 260    public:
 261     virtual int size() const = 0;
 262     virtual const StackTraceData *get(uint32_t i) const = 0;
 263   };
 264 
 265   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 266    public:
 267     LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) :
 268         _data(data) {}
 269     int size() const { return _data ? _data->length() : 0; }
 270     const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
 271 
 272    private:
 273     GrowableArray<StackTraceData> *_data;
 274   };
 275 
 276   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 277    public:
 278     GarbageStackTraceDataCopier(StackTraceData **data, int size) :
 279         _data(data), _size(size) {}
 280     int size() const { return _size; }
 281     const StackTraceData *get(uint32_t i) const { return _data[i]; }
 282 
 283    private:
 284     StackTraceData **_data;
 285     int _size;
 286   };
 287 
 288   // Instance initialization.
 289   void initialize_storage(int max_storage);
 290 
 291   // Copies from StackTraceData to jvmtiStackTrace.
 292   bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
 293 
 294   // Creates a deep copy of the list of StackTraceData.
 295   void copy_stack_traces(const StackTraceDataCopier &copier,
 296                          jvmtiStackTraces *traces);
 297 
 298   void store_garbage_trace(const StackTraceData &trace);
 299 
 300   void free_garbage();
 301 };
 302 
 303 StackTraceStorage* StackTraceStorage::internal_storage;
 304 
 305 // Statics for Sampler
 306 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 307 bool HeapMonitoring::_enabled;
 308 AlwaysTrueClosure HeapMonitoring::_always_true;
 309 jint HeapMonitoring::_monitoring_rate;
 310 
 311 // Cheap random number generator
 312 uint64_t HeapMonitoring::_rnd;
 313 
 314 StackTraceStorage::StackTraceStorage() :
 315   _allocated_traces(NULL),
 316   _recent_garbage_traces(NULL),
 317   _frequent_garbage_traces(NULL),
 318   _max_storage(0),
 319   _initialized(false) {
 320     memset(&_stats, 0, sizeof(_stats));
 321 }
 322 
 323 void StackTraceStorage::free_garbage() {
 324   StackTraceData **recent_garbage = NULL;
 325   uint32_t recent_size = 0;
 326 
 327   StackTraceData **frequent_garbage = NULL;
 328   uint32_t frequent_size = 0;
 329 
 330   if (_recent_garbage_traces != NULL) {
 331     recent_garbage = _recent_garbage_traces->get_traces();
 332     recent_size = _recent_garbage_traces->size();
 333   }
 334 
 335   if (_frequent_garbage_traces != NULL) {
 336     frequent_garbage = _frequent_garbage_traces->get_traces();
 337     frequent_size = _frequent_garbage_traces->size();
 338   }
 339 
 340   // Simple solution since this happens at exit.
 341   // Go through the recent and remove any that only are referenced there.
 342   for (uint32_t i = 0; i < recent_size; i++) {
 343     StackTraceData *trace = recent_garbage[i];
 344     if (trace != NULL) {
 345       trace->references--;
 346 
 347       if (trace->references == 0) {
 348         StackTraceData::free_data(trace);
 349       }
 350     }
 351   }
 352 
 353   // Then go through the frequent and remove those that are now only there.
 354   for (uint32_t i = 0; i < frequent_size; i++) {
 355     StackTraceData *trace = frequent_garbage[i];
 356     if (trace != NULL) {
 357       trace->references--;
 358 
 359       if (trace->references == 0) {
 360         StackTraceData::free_data(trace);
 361       }
 362     }
 363   }
 364 }
 365 
 366 StackTraceStorage::~StackTraceStorage() {
 367   delete _allocated_traces;
 368 
 369   free_garbage();
 370   delete _recent_garbage_traces;
 371   delete _frequent_garbage_traces;
 372   _initialized = false;
 373 }
 374 
 375 void StackTraceStorage::initialize_storage(int max_storage) {
 376   // In case multiple threads got locked and then 1 by 1 got through.
 377   if (_initialized) {
 378     return;
 379   }
 380 
 381   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 382       GrowableArray<StackTraceData>(128, true);
 383 
 384   _recent_garbage_traces = new MostRecentGarbageTraces(max_storage);
 385   _frequent_garbage_traces = new FrequentGarbageTraces(max_storage);
 386 
 387   _max_storage = max_storage;
 388   _initialized = true;
 389 }
 390 
 391 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
 392   StackTraceData new_data(trace, o);
 393   _stats.sample_count++;
 394   _stats.stack_depth_accumulation += trace->frame_count;
 395   _allocated_traces->append(new_data);
 396 }
 397 
 398 size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
 399                                        OopClosure *f) {
 400   size_t count = 0;
 401   if (is_initialized()) {
 402     int len = _allocated_traces->length();
 403 
 404     // Compact the oop traces.  Moves the live oops to the beginning of the
 405     // growable array, potentially overwriting the dead ones.
 406     int curr_pos = 0;
 407     for (int i = 0; i < len; i++) {
 408       StackTraceData &trace = _allocated_traces->at(i);
 409       oop value = trace.obj;
 410       if ((value != NULL && Universe::heap()->is_in_reserved(value)) &&
 411           is_alive->do_object_b(value)) {
 412         // Update the oop to point to the new object if it is still alive.
 413         f->do_oop(&(trace.obj));
 414 
 415         // Copy the old trace, if it is still live.
 416         _allocated_traces->at_put(curr_pos++, trace);
 417 
 418         count++;
 419       } else {
 420         // If the old trace is no longer live, add it to the list of
 421         // recently collected garbage.
 422         store_garbage_trace(trace);
 423       }
 424     }
 425 
 426     // Zero out remaining array elements.  Even though the call to trunc_to
 427     // below truncates these values, zeroing them out is good practice.
 428     StackTraceData zero_trace;
 429     for (int i = curr_pos; i < len; i++) {
 430       _allocated_traces->at_put(i, zero_trace);
 431     }
 432 
 433     // Set the array's length to the number of live elements.
 434     _allocated_traces->trunc_to(curr_pos);
 435   }
 436 
 437   return count;
 438 }
 439 
 440 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to,
 441                                   const StackTraceData *from) {
 442   const jvmtiStackTrace *src = from->trace;
 443   *to = *src;
 444 
 445   to->frames =
 446       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 447 
 448   if (to->frames == NULL) {
 449     return false;
 450   }
 451 
 452   memcpy(to->frames,
 453          src->frames,
 454          sizeof(jvmtiFrameInfo) * MaxStackDepth);
 455   return true;
 456 }
 457 
 458 // Called by the outside world; returns a copy of the stack traces
 459 // (because we could be replacing them as the user handles them).
 460 // The array is secretly null-terminated (to make it easier to reclaim).
 461 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) {
 462   LiveStackTraceDataCopier copier(_allocated_traces);
 463   copy_stack_traces(copier, traces);
 464 }
 465 
 466 // See comment on get_all_stack_traces
 467 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
 468   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 469                                      _recent_garbage_traces->size());
 470   copy_stack_traces(copier, traces);
 471 }
 472 
 473 // See comment on get_all_stack_traces
 474 void StackTraceStorage::get_frequent_garbage_stack_traces(
 475     jvmtiStackTraces *traces) {
 476   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 477                                      _frequent_garbage_traces->size());
 478   copy_stack_traces(copier, traces);
 479 }
 480 
 481 
 482 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
 483                                           jvmtiStackTraces *traces) {
 484   int len = copier.size();
 485 
 486   // Create a new array to store the StackTraceData objects.
 487   // + 1 for a NULL at the end.
 488   jvmtiStackTrace *t =
 489       NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
 490   if (t == NULL) {
 491     traces->stack_traces = NULL;
 492     traces->trace_count = 0;
 493     return;
 494   }
 495   // +1 to have a NULL at the end of the array.
 496   memset(t, 0, (len + 1) * sizeof(*t));
 497 
 498   // Copy the StackTraceData objects into the new array.
 499   int trace_count = 0;
 500   for (int i = 0; i < len; i++) {
 501     const StackTraceData *stack_trace = copier.get(i);
 502     if (stack_trace != NULL) {
 503       jvmtiStackTrace *to = &t[trace_count];
 504       if (!deep_copy(to, stack_trace)) {
 505         continue;
 506       }
 507       trace_count++;
 508     }
 509   }
 510 
 511   traces->stack_traces = t;
 512   traces->trace_count = trace_count;
 513 }
 514 
 515 void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) {
 516   StackTraceData *new_trace = new StackTraceData();
 517   *new_trace = trace;
 518 
 519   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 520 
 521   // Accepted is on the right of the boolean to force the store_trace to happen.
 522   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 523 
 524   if (!accepted) {
 525     // No one wanted to use it.
 526     delete new_trace;
 527   }
 528 
 529   _stats.garbage_collected_samples++;
 530 }
 531 
 532 // Delegate the initialization question to the underlying storage system.
 533 bool HeapMonitoring::initialized() {
 534   return StackTraceStorage::storage()->initialized();
 535 }
 536 
 537 // Delegate the initialization question to the underlying storage system.
 538 bool *HeapMonitoring::initialized_address() {
 539   return
 540       const_cast<bool*>(StackTraceStorage::storage()->initialized_address());
 541 }
 542 
 543 void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) {
 544   StackTraceStorage::storage()->get_all_stack_traces(traces);
 545 }
 546 
 547 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) {
 548   const jvmtiHeapSamplingStats& internal_stats =
 549       StackTraceStorage::storage()->get_heap_sampling_stats();
 550   *stats = internal_stats;
 551 }
 552 
 553 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) {
 554   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
 555 }
 556 
 557 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) {
 558   StackTraceStorage::storage()->get_garbage_stack_traces(traces);
 559 }
 560 
 561 void HeapMonitoring::release_traces(jvmtiStackTraces *traces) {
 562   jint trace_count = traces->trace_count;
 563   jvmtiStackTrace *stack_traces = traces->stack_traces;
 564 
 565   for (jint i = 0; i < trace_count; i++) {
 566     jvmtiStackTrace *current_trace = stack_traces + i;
 567     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
 568   }
 569 
 570   FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
 571   traces->trace_count = 0;
 572   traces->stack_traces = NULL;
 573 }
 574 
 575 // Invoked by the GC to clean up old stack traces and remove old arrays
 576 // of instrumentation that are still lying around.
 577 size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive,
 578                                     OopClosure *f) {
 579   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 580   return StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 581 }
 582 
 583 void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) {
 584   // Ignore if already enabled.
 585   if (_enabled) {
 586     return;
 587   }
 588 
 589   _monitoring_rate = monitoring_rate;
 590 
 591   // Initalize and reset.
 592   StackTraceStorage::initialize_stack_trace_storage(max_storage);
 593 
 594   // Populate the lookup table for fast_log2.
 595   // This approximates the log2 curve with a step function.
 596   // Steps have height equal to log2 of the mid-point of the step.
 597   for (int i = 0; i < (1 << FastLogNumBits); i++) {
 598     double half_way = static_cast<double>(i + 0.5);
 599     _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
 600   }
 601 
 602   JavaThread *t = static_cast<JavaThread *>(Thread::current());
 603   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 604   if (_rnd == 0) {
 605     _rnd = 1;
 606   }
 607   _enabled = true;
 608 }
 609 
 610 void HeapMonitoring::stop_profiling() {
 611   _enabled = false;
 612 }
 613 
 614 // Generates a geometric variable with the specified mean (512K by default).
 615 // This is done by generating a random number between 0 and 1 and applying
 616 // the inverse cumulative distribution function for an exponential.
 617 // Specifically: Let m be the inverse of the sample rate, then
 618 // the probability distribution function is m*exp(-mx) so the CDF is
 619 // p = 1 - exp(-mx), so
 620 // q = 1 - p = exp(-mx)
 621 // log_e(q) = -mx
 622 // -log_e(q)/m = x
 623 // log_2(q) * (-log_e(2) * 1/m) = x
 624 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 625 void HeapMonitoring::pick_next_sample(size_t *ptr) {
 626   _rnd = next_random(_rnd);
 627   // Take the top 26 bits as the random number
 628   // (This plus a 1<<58 sampling bound gives a max possible step of
 629   // 5194297183973780480 bytes.  In this case,
 630   // for sample_parameter = 1<<19, max possible step is
 631   // 9448372 bytes (24 bits).
 632   const uint64_t prng_mod_power = 48;  // Number of bits in prng
 633   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 634   // under piii debug for some binaries.
 635   double q = static_cast<uint32_t>(_rnd >> (prng_mod_power - 26)) + 1.0;
 636   // Put the computed p-value through the CDF of a geometric.
 637   // For faster performance (save ~1/20th exec time), replace
 638   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 639   // The value 26.000705 is used rather than 26 to compensate
 640   // for inaccuracies in FastLog2 which otherwise result in a
 641   // negative answer.
 642   double log_val = (fast_log2(q) - 26);
 643   size_t rate = static_cast<size_t>(
 644       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 645   *ptr = rate;
 646 
 647   StackTraceStorage::storage()->accumulate_sample_rate(rate);
 648 }
 649 
 650 // Called from the interpreter and C1
 651 void HeapMonitoring::object_alloc_unsized(oopDesc* o) {
 652   JavaThread *thread = static_cast<JavaThread *>(Thread::current());
 653   object_alloc_do_sample(thread, o, o->size() << LogHeapWordSize);
 654 }
 655 
 656 void HeapMonitoring::object_alloc(oopDesc* o, intx byte_size) {
 657   JavaThread *thread = static_cast<JavaThread *>(Thread::current());
 658   assert(o->size() << LogHeapWordSize == static_cast<long>(byte_size),
 659          "Object size is incorrect.");
 660   object_alloc_do_sample(thread, o, byte_size);
 661 }
 662 
 663 // Called directly by C2
 664 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
 665 #if defined(X86) || defined(PPC)
 666   JavaThread *thread = static_cast<JavaThread *>(t);
 667   if (StackTraceStorage::storage()->is_initialized()) {
 668     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 669     JavaThread *thread = static_cast<JavaThread *>(t);
 670 
 671     jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
 672     if (trace == NULL) {
 673       return;
 674     }
 675 
 676     jvmtiFrameInfo *frames =
 677         NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 678 
 679     if (frames == NULL) {
 680       FREE_C_HEAP_OBJ(trace);
 681       return;
 682     }
 683 
 684     trace->frames = frames;
 685     trace->thread_id = SharedRuntime::get_java_tid(thread);
 686     trace->size = byte_size;
 687     trace->frame_count = 0;
 688 
 689     if (thread->has_last_Java_frame()) { // just to be safe
 690       vframeStream vfst(thread, true);
 691       int count = 0;
 692       while (!vfst.at_end() && count < MaxStackDepth) {
 693         Method* m = vfst.method();
 694         frames[count].location = vfst.bci();
 695         frames[count].method = m->jmethod_id();
 696         count++;
 697 
 698         vfst.next();
 699       }
 700       trace->frame_count = count;
 701     }
 702 
 703     if (trace->frame_count> 0) {
 704       // Success!
 705       StackTraceStorage::storage()->add_trace(trace, o);
 706       return;
 707     }
 708 
 709     // Failure!
 710     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);
 711     FREE_C_HEAP_OBJ(trace);
 712     return;
 713   } else {
 714     // There is something like 64K worth of allocation before the VM
 715     // initializes.  This is just in the interests of not slowing down
 716     // startup.
 717     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 718   }
 719 #else
 720   Unimplemented();
 721 #endif
 722 }