1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "prims/forte.hpp"
  27 #include "runtime/heapMonitoring.hpp"
  28 
  29 static const int max_stack_depth = 64;
  30 
  31 // Internal data structure representing traces.
  32 struct StackTraceData : CHeapObj<mtInternal> {
  33   jvmtiStackTrace *trace;
  34   oop obj;
  35   int references;
  36 
  37   StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
  38 
  39   StackTraceData() : trace(NULL), obj(NULL), references(0) {}
  40 
  41   // StackTraceDatas are shared around the board between various lists. So
  42   // handle this by hand instead of having this in the destructor. There are
  43   // cases where the struct is on the stack but holding heap data not to be
  44   // freed.
  45   static void free_data(StackTraceData *data) {
  46     if (data->trace != NULL) {
  47       FREE_C_HEAP_ARRAY(jvmtiCallFrame, data->trace->frames);
  48       FREE_C_HEAP_OBJ(data->trace);
  49     }
  50     delete data;
  51   }
  52 };
  53 
  54 // Fixed size buffer for holding garbage traces.
  55 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  56  public:
  57   GarbageTracesBuffer(uint32_t size) : _size(size) {
  58     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
  59                                        size,
  60                                        mtInternal);
  61     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
  62   }
  63 
  64   virtual ~GarbageTracesBuffer() {
  65     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
  66   }
  67 
  68   StackTraceData** get_traces() const {
  69     return _garbage_traces;
  70   }
  71 
  72   bool store_trace(StackTraceData *trace) {
  73     uint32_t index;
  74     if (!select_replacement(&index)) {
  75       return false;
  76     }
  77 
  78     StackTraceData *old_data = _garbage_traces[index];
  79 
  80     if (old_data != NULL) {
  81       old_data->references--;
  82 
  83       if (old_data->references == 0) {
  84         StackTraceData::free_data(old_data);
  85       }
  86     }
  87 
  88     trace->references++;
  89     _garbage_traces[index] = trace;
  90     return true;
  91   }
  92 
  93   uint32_t size() const {
  94     return _size;
  95   }
  96 
  97  protected:
  98   // Subclasses select the trace to replace. Returns false if no replacement
  99   // is to happen, otherwise stores the index of the trace to replace in
 100   // *index.
 101   virtual bool select_replacement(uint32_t *index) = 0;
 102 
 103   const uint32_t _size;
 104 
 105  private:
 106   // The current garbage traces.  A fixed-size ring buffer.
 107   StackTraceData **_garbage_traces;
 108 };
 109 
 110 // Keep statistical sample of traces over the lifetime of the server.
 111 // When the buffer is full, replace a random entry with probability
 112 // 1/samples_seen. This strategy tends towards preserving the most frequently
 113 // occuring traces over time.
 114 class FrequentGarbageTraces : public GarbageTracesBuffer {
 115  public:
 116   FrequentGarbageTraces(int size)
 117       : GarbageTracesBuffer(size),
 118       _garbage_traces_pos(0),
 119       _samples_seen(0) {
 120       }
 121 
 122   virtual ~FrequentGarbageTraces() {
 123   }
 124 
 125   virtual bool select_replacement(uint32_t* index) {
 126     ++_samples_seen;
 127 
 128     if (_garbage_traces_pos < _size) {
 129       *index = _garbage_traces_pos++;
 130       return true;
 131     }
 132 
 133     uint64_t random_uint64 =
 134         (static_cast<uint64_t>(::random()) << 32) | ::random();
 135 
 136     uint32_t random_index = random_uint64 % _samples_seen;
 137     if (random_index < _size) {
 138       *index = random_index;
 139       return true;
 140     }
 141 
 142     return false;
 143   }
 144 
 145  private:
 146   // The current position in the buffer as we initially fill it.
 147   uint32_t _garbage_traces_pos;
 148 
 149   uint64_t _samples_seen;
 150 };
 151 
 152 // Store most recent garbage traces.
 153 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 154  public:
 155   MostRecentGarbageTraces(int size)
 156       : GarbageTracesBuffer(size),
 157       _garbage_traces_pos(0) {
 158       }
 159 
 160   virtual ~MostRecentGarbageTraces() {
 161   }
 162 
 163   virtual bool select_replacement(uint32_t* index) {
 164     *index = _garbage_traces_pos;
 165 
 166     _garbage_traces_pos =
 167         (_garbage_traces_pos + 1) % _size;
 168 
 169     return true;
 170   }
 171 
 172  private:
 173   // The current position in the buffer.
 174   uint32_t _garbage_traces_pos;
 175 };
 176 
 177 // Each object that we profile is stored as trace with the thread_id.
 178 class StackTraceStorage : public CHeapObj<mtInternal> {
 179  public:
 180   // The function that gets called to add a trace to the list of
 181   // traces we are maintaining.
 182   void add_trace(jvmtiStackTrace *trace, oop o);
 183 
 184   // The function that gets called by the client to retrieve the list
 185   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 186   void get_all_stack_traces(jvmtiStackTraces *traces);
 187 
 188   // The function that gets called by the client to retrieve the list
 189   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 190   void get_garbage_stack_traces(jvmtiStackTraces *traces);
 191 
 192   // The function that gets called by the client to retrieve the list
 193   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 194   void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces);
 195 
 196   // Executes whenever weak references are traversed.  is_alive tells
 197   // you if the given oop is still reachable and live.
 198   void weak_oops_do(BoolObjectClosure* is_alive,
 199                     OopClosure *f,
 200                     VoidClosure* complete_gc);
 201 
 202   ~StackTraceStorage();
 203   StackTraceStorage();
 204 
 205   static StackTraceStorage* storage() {
 206     if (internal_storage == NULL) {
 207       internal_storage = new StackTraceStorage();
 208     }
 209     return internal_storage;
 210   }
 211 
 212   static void reset_stack_trace_storage() {
 213     delete internal_storage, internal_storage = NULL;
 214   }
 215 
 216   bool is_initialized() {
 217     return _initialized;
 218   }
 219 
 220   // Static method to set the storage in place at initialization.
 221   static void initialize_stack_trace_storage(int max_storage) {
 222     reset_stack_trace_storage();
 223     StackTraceStorage *storage = StackTraceStorage::storage();
 224     storage->initialize_storage(max_storage);
 225   }
 226 
 227 
 228   bool initialized() { return _initialized; }
 229   volatile bool *initialized_address() { return &_initialized; }
 230 
 231  private:
 232   // The traces currently sampled.
 233   GrowableArray<StackTraceData> *_allocated_traces;
 234 
 235   // Recent garbage traces.
 236   MostRecentGarbageTraces *_recent_garbage_traces;
 237 
 238   // Frequent garbage traces.
 239   FrequentGarbageTraces *_frequent_garbage_traces;
 240 
 241   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 242   int _max_storage;
 243 
 244   static StackTraceStorage* internal_storage;
 245   volatile bool _initialized;
 246 
 247   // Support functions and classes for copying data to the external
 248   // world.
 249   class StackTraceDataCopier {
 250    public:
 251     virtual int size() const = 0;
 252     virtual const StackTraceData *get(uint32_t i) const = 0;
 253   };
 254 
 255   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 256    public:
 257     LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) :
 258         _data(data) {}
 259     int size() const { return _data ? _data->length() : 0; }
 260     const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
 261 
 262    private:
 263     GrowableArray<StackTraceData> *_data;
 264   };
 265 
 266   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 267    public:
 268     GarbageStackTraceDataCopier(StackTraceData **data, int size) :
 269         _data(data), _size(size) {}
 270     int size() const { return _size; }
 271     const StackTraceData *get(uint32_t i) const { return _data[i]; }
 272 
 273    private:
 274     StackTraceData **_data;
 275     int _size;
 276   };
 277 
 278   // Instance initialization.
 279   void initialize_storage(int max_storage);
 280 
 281   // Copies from StackTraceData to jvmtiStackTrace.
 282   bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
 283 
 284   // Creates a deep copy of the list of StackTraceData.
 285   void copy_stack_traces(const StackTraceDataCopier &copier,
 286                          jvmtiStackTraces *traces);
 287 
 288   void store_garbage_trace(const StackTraceData &trace);
 289 
 290   void free_garbage();
 291 };
 292 
 293 StackTraceStorage* StackTraceStorage::internal_storage;
 294 
 295 // Statics for Sampler
 296 double HeapMonitoring::_log_table[1 << _fast_log_num_bits];
 297 bool HeapMonitoring::_enabled;
 298 
 299 jint HeapMonitoring::_monitoring_rate;
 300 
 301 // Cheap random number generator
 302 uint64_t HeapMonitoring::_rnd;
 303 
 304 StackTraceStorage::StackTraceStorage() :
 305   _allocated_traces(NULL),
 306   _recent_garbage_traces(NULL),
 307   _frequent_garbage_traces(NULL),
 308   _max_storage(0),
 309   _initialized(false) {
 310 }
 311 
 312 void StackTraceStorage::free_garbage() {
 313   StackTraceData **recent_garbage = NULL;
 314   uint32_t recent_size = 0;
 315 
 316   StackTraceData **frequent_garbage = NULL;
 317   uint32_t frequent_size = 0;
 318 
 319   if (_recent_garbage_traces != NULL) {
 320     recent_garbage = _recent_garbage_traces->get_traces();
 321     recent_size = _recent_garbage_traces->size();
 322   }
 323 
 324   if (_frequent_garbage_traces != NULL) {
 325     frequent_garbage = _frequent_garbage_traces->get_traces();
 326     frequent_size = _frequent_garbage_traces->size();
 327   }
 328 
 329   // Simple solution since this happens at exit.
 330   // Go through the recent and remove any that only are referenced there.
 331   for (uint32_t i = 0; i < recent_size; i++) {
 332     StackTraceData *trace = recent_garbage[i];
 333     if (trace != NULL) {
 334       trace->references--;
 335 
 336       if (trace->references == 0) {
 337         StackTraceData::free_data(trace);
 338       }
 339     }
 340   }
 341 
 342   // Then go through the frequent and remove those that are now only there.
 343   for (uint32_t i = 0; i < frequent_size; i++) {
 344     StackTraceData *trace = frequent_garbage[i];
 345     if (trace != NULL) {
 346       trace->references--;
 347 
 348       if (trace->references == 0) {
 349         StackTraceData::free_data(trace);
 350       }
 351     }
 352   }
 353 }
 354 
 355 StackTraceStorage::~StackTraceStorage() {
 356   delete _allocated_traces;
 357 
 358   free_garbage();
 359   delete _recent_garbage_traces;
 360   delete _frequent_garbage_traces;
 361   _initialized = false;
 362 }
 363 
 364 void StackTraceStorage::initialize_storage(int max_storage) {
 365   // In case multiple threads got locked and then 1 by 1 got through.
 366   if (_initialized) {
 367     return;
 368   }
 369 
 370   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 371       GrowableArray<StackTraceData>(128, true);
 372 
 373   _recent_garbage_traces = new MostRecentGarbageTraces(max_storage);
 374   _frequent_garbage_traces = new FrequentGarbageTraces(max_storage);
 375 
 376   _max_storage = max_storage;
 377   _initialized = true;
 378 }
 379 
 380 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
 381   StackTraceData new_data(trace, o);
 382   _allocated_traces->append(new_data);
 383 }
 384 
 385 void StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
 386                                      OopClosure *f,
 387                                      VoidClosure *complete_gc) {
 388   if (is_initialized()) {
 389     int len = _allocated_traces->length();
 390 
 391     // Compact the oop traces.  Moves the live oops to the beginning of the
 392     // growable array, potentially overwriting the dead ones.
 393     int curr_pos = 0;
 394     for (int i = 0; i < len; i++) {
 395       StackTraceData &trace = _allocated_traces->at(i);
 396       oop value = trace.obj;
 397       if ((value != NULL && Universe::heap()->is_in_reserved(value)) &&
 398           (is_alive == NULL || is_alive->do_object_b(value))) {
 399         // Update the oop to point to the new object if it is still alive.
 400         f->do_oop(&(trace.obj));
 401 
 402         // Copy the old trace, if it is still live.
 403         _allocated_traces->at_put(curr_pos++, trace);
 404       } else {
 405         // If the old trace is no longer live, add it to the list of
 406         // recently collected garbage.
 407         store_garbage_trace(trace);
 408       }
 409     }
 410 
 411     // Zero out remaining array elements.  Even though the call to trunc_to
 412     // below truncates these values, zeroing them out is good practice.
 413     StackTraceData zero_trace;
 414     for (int i = curr_pos; i < len; i++) {
 415       _allocated_traces->at_put(i, zero_trace);
 416     }
 417 
 418     // Set the array's length to the number of live elements.
 419     _allocated_traces->trunc_to(curr_pos);
 420     if (complete_gc != NULL) {
 421       complete_gc->do_void();
 422     }
 423   }
 424 }
 425 
 426 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to,
 427                                   const StackTraceData *from) {
 428   const jvmtiStackTrace *src = from->trace;
 429   *to = *src;
 430 
 431   to->frames =
 432       NEW_C_HEAP_ARRAY(jvmtiCallFrame, max_stack_depth, mtInternal);
 433 
 434   if (to->frames == NULL) {
 435     return false;
 436   }
 437 
 438   memcpy(to->frames,
 439          src->frames,
 440          sizeof(jvmtiCallFrame) * max_stack_depth);
 441   return true;
 442 }
 443 
 444 // Called by the outside world; returns a copy of the stack traces
 445 // (because we could be replacing them as the user handles them).
 446 // The array is secretly null-terminated (to make it easier to reclaim).
 447 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) {
 448   LiveStackTraceDataCopier copier(_allocated_traces);
 449   copy_stack_traces(copier, traces);
 450 }
 451 
 452 // See comment on get_all_stack_traces
 453 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
 454   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 455                                      _recent_garbage_traces->size());
 456   copy_stack_traces(copier, traces);
 457 }
 458 
 459 // See comment on get_all_stack_traces
 460 void StackTraceStorage::get_frequent_garbage_stack_traces(
 461     jvmtiStackTraces *traces) {
 462   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 463                                      _frequent_garbage_traces->size());
 464   copy_stack_traces(copier, traces);
 465 }
 466 
 467 
 468 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
 469                                           jvmtiStackTraces *traces) {
 470   int len = copier.size();
 471 
 472   // Create a new array to store the StackTraceData objects.
 473   // + 1 for a NULL at the end.
 474   jvmtiStackTrace *t =
 475       NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
 476   if (t == NULL) {
 477     traces->stack_traces = NULL;
 478     traces->trace_count = 0;
 479     return;
 480   }
 481   // +1 to have a NULL at the end of the array.
 482   memset(t, 0, (len + 1) * sizeof(*t));
 483 
 484   // Copy the StackTraceData objects into the new array.
 485   int trace_count = 0;
 486   for (int i = 0; i < len; i++) {
 487     const StackTraceData *stack_trace = copier.get(i);
 488     if (stack_trace != NULL) {
 489       jvmtiStackTrace *to = &t[trace_count];
 490       if (!deep_copy(to, stack_trace)) {
 491         continue;
 492       }
 493       trace_count++;
 494     }
 495   }
 496 
 497   traces->stack_traces = t;
 498   traces->trace_count = trace_count;
 499 }
 500 
 501 void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) {
 502   StackTraceData *new_trace = new StackTraceData();
 503   *new_trace = trace;
 504 
 505   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 506 
 507   // Accepted is on the right of the boolean to force the store_trace to happen.
 508   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 509 
 510   if (!accepted) {
 511     // No one wanted to use it.
 512     delete new_trace;
 513   }
 514 }
 515 
 516 // Delegate the initialization question to the underlying storage system.
 517 bool HeapMonitoring::initialized() {
 518   return StackTraceStorage::storage()->initialized();
 519 }
 520 
 521 // Delegate the initialization question to the underlying storage system.
 522 bool *HeapMonitoring::initialized_address() {
 523   return
 524       const_cast<bool*>(StackTraceStorage::storage()->initialized_address());
 525 }
 526 
 527 void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) {
 528   StackTraceStorage::storage()->get_all_stack_traces(traces);
 529 }
 530 
 531 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) {
 532   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
 533 }
 534 
 535 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) {
 536   StackTraceStorage::storage()->get_garbage_stack_traces(traces);
 537 }
 538 
 539 void HeapMonitoring::release_traces(jvmtiStackTraces *trace_info) {
 540   jint trace_count = trace_info->trace_count;
 541   jvmtiStackTrace *traces = trace_info->stack_traces;
 542 
 543   for (jint i = 0; i < trace_count; i++) {
 544     jvmtiStackTrace *current_trace = traces + i;
 545     FREE_C_HEAP_ARRAY(jvmtiCallFrame, current_trace->frames);
 546   }
 547 
 548   FREE_C_HEAP_ARRAY(jvmtiStackTrace, trace_info->stack_traces);
 549   trace_info->trace_count = 0;
 550   trace_info->stack_traces = NULL;
 551 }
 552 
 553 // Invoked by the GC to clean up old stack traces and remove old arrays
 554 // of instrumentation that are still lying around.
 555 void HeapMonitoring::weak_oops_do(
 556     AbstractRefProcTaskExecutor *task_executor,
 557     BoolObjectClosure* is_alive,
 558     OopClosure *f,
 559     VoidClosure *complete_gc) {
 560   if (task_executor != NULL) {
 561     task_executor->set_single_threaded_mode();
 562   }
 563   StackTraceStorage::storage()->weak_oops_do(is_alive, f, complete_gc);
 564 }
 565 
 566 void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) {
 567   // Ignore if already enabled.
 568   if (_enabled) {
 569     return;
 570   }
 571 
 572   _monitoring_rate = monitoring_rate;
 573 
 574   // Initalize and reset.
 575   StackTraceStorage::initialize_stack_trace_storage(max_storage);
 576 
 577   // Populate the lookup table for fast_log2.
 578   // This approximates the log2 curve with a step function.
 579   // Steps have height equal to log2 of the mid-point of the step.
 580   for (int i = 0; i < (1 << _fast_log_num_bits); i++) {
 581     double half_way = static_cast<double>(i + 0.5);
 582     _log_table[i] = (log(1.0 + half_way / (1 << _fast_log_num_bits)) / log(2.0));
 583   }
 584 
 585   JavaThread *t = static_cast<JavaThread *>(Thread::current());
 586   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 587   if (_rnd == 0) {
 588     _rnd = 1;
 589   }
 590   for (int i = 0; i < 20; i++) {
 591     _rnd = next_random(_rnd);
 592   }
 593 
 594   _enabled = true;
 595 }
 596 
 597 void HeapMonitoring::stop_profiling() {
 598   _enabled = false;
 599 }
 600 
 601 // Generates a geometric variable with the specified mean (512K by default).
 602 // This is done by generating a random number between 0 and 1 and applying
 603 // the inverse cumulative distribution function for an exponential.
 604 // Specifically: Let m be the inverse of the sample rate, then
 605 // the probability distribution function is m*exp(-mx) so the CDF is
 606 // p = 1 - exp(-mx), so
 607 // q = 1 - p = exp(-mx)
 608 // log_e(q) = -mx
 609 // -log_e(q)/m = x
 610 // log_2(q) * (-log_e(2) * 1/m) = x
 611 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 612 void HeapMonitoring::pick_next_sample(size_t *ptr) {
 613   _rnd = next_random(_rnd);
 614   // Take the top 26 bits as the random number
 615   // (This plus a 1<<58 sampling bound gives a max possible step of
 616   // 5194297183973780480 bytes.  In this case,
 617   // for sample_parameter = 1<<19, max possible step is
 618   // 9448372 bytes (24 bits).
 619   const uint64_t prng_mod_power = 48;  // Number of bits in prng
 620   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 621   // under piii debug for some binaries.
 622   double q = static_cast<uint32_t>(_rnd >> (prng_mod_power - 26)) + 1.0;
 623   // Put the computed p-value through the CDF of a geometric.
 624   // For faster performance (save ~1/20th exec time), replace
 625   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 626   // The value 26.000705 is used rather than 26 to compensate
 627   // for inaccuracies in FastLog2 which otherwise result in a
 628   // negative answer.
 629   double log_val = (fast_log2(q) - 26);
 630   *ptr = static_cast<size_t>(
 631       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 632 }
 633 
 634 // Called from the interpreter and C1
 635 void HeapMonitoring::object_alloc_unsized(oopDesc* o) {
 636   JavaThread *thread = static_cast<JavaThread *>(Thread::current());
 637   object_alloc_do_sample(thread, o, o->size() << LogHeapWordSize);
 638 }
 639 
 640 void HeapMonitoring::object_alloc(oopDesc* o, intx byte_size) {
 641   JavaThread *thread = static_cast<JavaThread *>(Thread::current());
 642   assert(o->size() << LogHeapWordSize == static_cast<long>(byte_size),
 643          "Object size is incorrect.");
 644   object_alloc_do_sample(thread, o, byte_size);
 645 }
 646 
 647 // Called directly by C2
 648 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
 649 #if defined(X86) || defined(PPC)
 650   JavaThread *thread = static_cast<JavaThread *>(t);
 651   if (StackTraceStorage::storage()->is_initialized()) {
 652     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 653     JavaThread *thread = static_cast<JavaThread *>(t);
 654 
 655     jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
 656     if (trace == NULL) {
 657       return;
 658     }
 659 
 660     jvmtiCallFrame *frames =
 661         NEW_C_HEAP_ARRAY(jvmtiCallFrame, max_stack_depth, mtInternal);
 662 
 663     if (frames == NULL) {
 664       FREE_C_HEAP_OBJ(trace);
 665       return;
 666     }
 667 
 668     trace->frames = frames;
 669     trace->env_id = (JavaThread::current())->jni_environment();
 670     trace->thread_id = SharedRuntime::get_java_tid(thread);
 671     trace->size = byte_size;
 672     trace->frame_count = 0;
 673 
 674     if (thread->has_last_Java_frame()) { // just to be safe
 675       vframeStream vfst(thread, true);
 676       int count = 0;
 677       while (!vfst.at_end() && count < max_stack_depth) {
 678         Method* m = vfst.method();
 679         frames[count].bci = vfst.bci();
 680         frames[count].method_id = m->jmethod_id();
 681         count++;
 682 
 683         vfst.next();
 684       }
 685       trace->frame_count = count;
 686     }
 687 
 688     if (trace->frame_count> 0) {
 689       // Success!
 690       StackTraceStorage::storage()->add_trace(trace, o);
 691       return;
 692     }
 693 
 694     // Failure!
 695     FREE_C_HEAP_ARRAY(jvmtiCallFrame, trace->frames);
 696     FREE_C_HEAP_OBJ(trace);
 697     return;
 698   } else {
 699     // There is something like 64K worth of allocation before the VM
 700     // initializes.  This is just in the interests of not slowing down
 701     // startup.
 702     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 703   }
 704 #else
 705   Unimplemented();
 706 #endif
 707 }