1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "prims/forte.hpp"
  27 #include "runtime/heapMonitoring.hpp"
  28 
  29 // Keep muxlock for now
  30 // Now that ASGCT is gone, a bit of refactoring in the addtrace... and
  31 // StackTraceData
  32 
  33 const int kMaxStackDepth = 64;
  34 
  35 // Internal data structure representing traces.
  36 struct StackTraceData : CHeapObj<mtInternal> {
  37   jvmtiStackTrace *trace;
  38   oop obj;
  39   int references;
  40 
  41   StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
  42 
  43   StackTraceData() : trace(NULL), obj(NULL), references(0) {}
  44 
  45   // StackTraceDatas are shared around the board between various lists. So
  46   // handle this by hand instead of having this in the destructor. There are
  47   // cases where the struct is on the stack but holding heap data not to be
  48   // freed.
  49   static void FreeData(StackTraceData *data) {
  50     if (data->trace != NULL) {
  51       FREE_C_HEAP_ARRAY(jvmtiCallFrame, data->trace->frames);
  52       FREE_C_HEAP_OBJ(data->trace);
  53     }
  54     delete data;
  55   }
  56 };
  57 
  58 // Fixed size buffer for holding garbage traces.
  59 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  60  public:
  61   GarbageTracesBuffer(uint32_t size) : _size(size) {
  62     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
  63                                        size,
  64                                        mtInternal);
  65     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
  66   }
  67 
  68   virtual ~GarbageTracesBuffer() {
  69     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
  70   }
  71 
  72   StackTraceData** get_traces() const {
  73     return _garbage_traces;
  74   }
  75 
  76   bool store_trace(StackTraceData *trace) {
  77     uint32_t index;
  78     if (!select_replacement(&index)) {
  79       return false;
  80     }
  81 
  82     StackTraceData *old_data = _garbage_traces[index];
  83 
  84     if (old_data != NULL) {
  85       old_data->references--;
  86 
  87       if (old_data->references == 0) {
  88         StackTraceData::FreeData(old_data);
  89       }
  90     }
  91 
  92     trace->references++;
  93     _garbage_traces[index] = trace;
  94     return true;
  95   }
  96 
  97   uint32_t size() const {
  98     return _size;
  99   }
 100 
 101  protected:
 102   // Subclasses select the trace to replace. Returns false if no replacement
 103   // is to happen, otherwise stores the index of the trace to replace in
 104   // *index.
 105   virtual bool select_replacement(uint32_t *index) = 0;
 106 
 107   const uint32_t _size;
 108 
 109  private:
 110   // The current garbage traces.  A fixed-size ring buffer.
 111   StackTraceData **_garbage_traces;
 112 };
 113 
 114 // Keep statistical sample of traces over the lifetime of the server.
 115 // When the buffer is full, replace a random entry with probability
 116 // 1/samples_seen. This strategy tends towards preserving the most frequently
 117 // occuring traces over time.
 118 class FrequentGarbageTraces : public GarbageTracesBuffer {
 119  public:
 120   FrequentGarbageTraces(int size)
 121       : GarbageTracesBuffer(size),
 122       _garbage_traces_pos(0),
 123       _samples_seen(0) {
 124       }
 125 
 126   virtual ~FrequentGarbageTraces() {
 127   }
 128 
 129   virtual bool select_replacement(uint32_t* index) {
 130     ++_samples_seen;
 131 
 132     if (_garbage_traces_pos < _size) {
 133       *index = _garbage_traces_pos++;
 134       return true;
 135     }
 136 
 137     uint64_t random_uint64 =
 138         (static_cast<uint64_t>(::random()) << 32)
 139         | ::random();
 140 
 141     uint32_t random_index = random_uint64 % _samples_seen;
 142     if (random_index < _size) {
 143       *index = random_index;
 144       return true;
 145     }
 146 
 147     return false;
 148   }
 149 
 150  private:
 151   // The current position in the buffer as we initially fill it.
 152   uint32_t _garbage_traces_pos;
 153 
 154   uint64_t _samples_seen;
 155 };
 156 
 157 // Store most recent garbage traces.
 158 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 159  public:
 160   MostRecentGarbageTraces(int size)
 161       : GarbageTracesBuffer(size),
 162       _garbage_traces_pos(0) {
 163       }
 164 
 165   virtual ~MostRecentGarbageTraces() {
 166   }
 167 
 168   virtual bool select_replacement(uint32_t* index) {
 169     *index = _garbage_traces_pos;
 170 
 171     _garbage_traces_pos =
 172         (_garbage_traces_pos + 1) % _size;
 173 
 174     return true;
 175   }
 176 
 177  private:
 178   // The current position in the buffer.
 179   uint32_t _garbage_traces_pos;
 180 };
 181 
 182 // Each object that we profile is stored as trace with the thread_id.
 183 class StackTraceStorage {
 184  public:
 185   // The function that gets called to add a trace to the list of
 186   // traces we are maintaining.
 187   void add_trace(jvmtiStackTrace *trace, oop o);
 188 
 189   // The function that gets called by the client to retrieve the list
 190   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 191   void get_all_stack_traces(jvmtiStackTraces *traces);
 192 
 193   // The function that gets called by the client to retrieve the list
 194   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 195   void get_garbage_stack_traces(jvmtiStackTraces *traces);
 196 
 197   // The function that gets called by the client to retrieve the list
 198   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 199   void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces);
 200 
 201   // Executes whenever weak references are traversed.  is_alive tells
 202   // you if the given oop is still reachable and live.
 203   void do_weak_oops(BoolObjectClosure* is_alive,
 204                     OopClosure *f,
 205                     VoidClosure* complete_gc);
 206 
 207   ~StackTraceStorage();
 208   StackTraceStorage();
 209 
 210   static StackTraceStorage* storage() {
 211     if (internal_storage == NULL) {
 212       internal_storage = new StackTraceStorage();
 213     }
 214     return internal_storage;
 215   }
 216 
 217   static void reset_stack_trace_storage() {
 218     delete internal_storage, internal_storage = NULL;
 219   }
 220 
 221   bool is_initialized() {
 222     return _initialized;
 223   }
 224 
 225   // Static method to set the storage in place at initialization.
 226   static void initialize_stack_trace_storage(int max_storage) {
 227     reset_stack_trace_storage();
 228     StackTraceStorage *storage = StackTraceStorage::storage();
 229     storage->initialize_storage(max_storage);
 230   }
 231 
 232 
 233   bool initialized() { return _initialized; }
 234   volatile bool *initialized_address() { return &_initialized; }
 235 
 236  private:
 237   // Protects the traces currently sampled (below).
 238   volatile intptr_t _allocated_traces_lock[1];
 239 
 240   // The traces currently sampled.
 241   GrowableArray<StackTraceData> *_allocated_traces;
 242 
 243   // Recent garbage traces.
 244   MostRecentGarbageTraces *_recent_garbage_traces;
 245 
 246   // Frequent garbage traces.
 247   FrequentGarbageTraces *_frequent_garbage_traces;
 248 
 249   // Maximum size of the allocation.
 250   size_t _allocated_traces_size;
 251 
 252   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 253   int _max_storage;
 254 
 255   static StackTraceStorage* internal_storage;
 256   volatile bool _initialized;
 257 
 258   // Support functions and classes for copying data to the external
 259   // world.
 260   class StackTraceDataCopier {
 261    public:
 262     virtual int size() const = 0;
 263     virtual const StackTraceData *get(uint32_t i) const = 0;
 264   };
 265 
 266   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 267    public:
 268     LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) :
 269         _data(data) {}
 270     int size() const { return _data->length(); }
 271     const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
 272 
 273    private:
 274     GrowableArray<StackTraceData> *_data;
 275   };
 276 
 277   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 278    public:
 279     GarbageStackTraceDataCopier(StackTraceData **data, int size) :
 280         _data(data), _size(size) {}
 281     int size() const { return _size; }
 282     const StackTraceData *get(uint32_t i) const { return _data[i]; }
 283 
 284    private:
 285     StackTraceData **_data;
 286     int _size;
 287   };
 288 
 289   // Instance initialization.
 290   void initialize_storage(int max_storage);
 291 
 292   // Copies from StackTraceData to jvmtiStackTrace.
 293   bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
 294 
 295   // Creates a deep copy of the list of StackTraceData.
 296   void copy_stack_traces(const StackTraceDataCopier &copier,
 297                          jvmtiStackTraces *traces);
 298 
 299   void store_garbage_trace(const StackTraceData &trace);
 300 
 301   void FreeGarbage();
 302 };
 303 
 304 StackTraceStorage* StackTraceStorage::internal_storage;
 305 
 306 // Statics for Sampler
 307 double HeapMonitoring::_log_table[1 << kFastlogNumBits];
 308 bool HeapMonitoring::_enabled;
 309 
 310 jint HeapMonitoring::_monitoring_rate;
 311 
 312 // Cheap random number generator
 313 uint64_t HeapMonitoring::_rnd;
 314 
 315 StackTraceStorage::StackTraceStorage() :
 316   _allocated_traces(NULL),
 317   _recent_garbage_traces(NULL),
 318   _frequent_garbage_traces(NULL),
 319   _max_storage(0),
 320   _initialized(false) {
 321   _allocated_traces_lock[0] = 0;
 322 }
 323 
 324 void StackTraceStorage::FreeGarbage() {
 325   StackTraceData **recent_garbage = NULL;
 326   uint32_t recent_size = 0;
 327 
 328   StackTraceData **frequent_garbage = NULL;
 329   uint32_t frequent_size = 0;
 330 
 331   if (_recent_garbage_traces != NULL) {
 332     recent_garbage = _recent_garbage_traces->get_traces();
 333     recent_size = _recent_garbage_traces->size();
 334   }
 335 
 336   if (_frequent_garbage_traces != NULL) {
 337     frequent_garbage = _frequent_garbage_traces->get_traces();
 338     frequent_size = _frequent_garbage_traces->size();
 339   }
 340 
 341   // Simple solution since this happens at exit.
 342   // Go through the recent and remove any that only are referenced there.
 343   for (uint32_t i = 0; i < recent_size; i++) {
 344     StackTraceData *trace = recent_garbage[i];
 345     if (trace != NULL) {
 346       trace->references--;
 347 
 348       if (trace->references == 0) {
 349         StackTraceData::FreeData(trace);
 350       }
 351     }
 352   }
 353 
 354   // Then go through the frequent and remove those that are now only there.
 355   for (uint32_t i = 0; i < frequent_size; i++) {
 356     StackTraceData *trace = frequent_garbage[i];
 357     if (trace != NULL) {
 358       trace->references--;
 359 
 360       if (trace->references == 0) {
 361         StackTraceData::FreeData(trace);
 362       }
 363     }
 364   }
 365 }
 366 
 367 StackTraceStorage::~StackTraceStorage() {
 368   delete _allocated_traces;
 369 
 370   FreeGarbage();
 371   delete _recent_garbage_traces;
 372   delete _frequent_garbage_traces;
 373   _initialized = false;
 374 }
 375 
 376 void StackTraceStorage::initialize_storage(int max_storage) {
 377   // In case multiple threads got locked and then 1 by 1 got through.
 378   if (_initialized) {
 379     return;
 380   }
 381 
 382   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 383       GrowableArray<StackTraceData>(128, true);
 384 
 385   _recent_garbage_traces = new MostRecentGarbageTraces(max_storage);
 386   _frequent_garbage_traces = new FrequentGarbageTraces(max_storage);
 387 
 388   _max_storage = max_storage;
 389   _initialized = true;
 390 }
 391 
 392 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
 393   StackTraceData new_data(trace, o);
 394   _allocated_traces->append(new_data);
 395 }
 396 
 397 void StackTraceStorage::do_weak_oops(BoolObjectClosure *is_alive,
 398                                      OopClosure *f,
 399                                      VoidClosure *complete_gc) {
 400   if (is_initialized()) {
 401     int len = _allocated_traces->length();
 402 
 403     // Compact the oop traces.  Moves the live oops to the beginning of the
 404     // growable array, potentially overwriting the dead ones.
 405     int curr_pos = 0;
 406     for (int i = 0; i < len; i++) {
 407       StackTraceData &trace = _allocated_traces->at(i);
 408       oop value = trace.obj;
 409       if ((value != NULL && Universe::heap()->is_in_reserved(value)) &&
 410           (is_alive == NULL || is_alive->do_object_b(value))) {
 411         // Update the oop to point to the new object if it is still alive.
 412         f->do_oop(&(trace.obj));
 413 
 414         // Copy the old trace, if it is still live.
 415         _allocated_traces->at_put(curr_pos++, trace);
 416       } else {
 417         // If the old trace is no longer live, add it to the list of
 418         // recently collected garbage.
 419         store_garbage_trace(trace);
 420       }
 421     }
 422 
 423     // Zero out remaining array elements.  Even though the call to trunc_to
 424     // below truncates these values, zeroing them out is good practice.
 425     StackTraceData zero_trace;
 426     for (int i = curr_pos; i < len; i++) {
 427       _allocated_traces->at_put(i, zero_trace);
 428     }
 429 
 430     // Set the array's length to the number of live elements.
 431     _allocated_traces->trunc_to(curr_pos);
 432     if (complete_gc != NULL) {
 433       complete_gc->do_void();
 434     }
 435   }
 436 }
 437 
 438 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to,
 439                                   const StackTraceData *from) {
 440   const jvmtiStackTrace *src = from->trace;
 441   *to = *src;
 442 
 443   to->frames =
 444       NEW_C_HEAP_ARRAY(jvmtiCallFrame, kMaxStackDepth, mtInternal);
 445 
 446   if (to->frames == NULL) {
 447     return false;
 448   }
 449 
 450   memcpy(to->frames,
 451          src->frames,
 452          sizeof(jvmtiCallFrame) * kMaxStackDepth);
 453   return true;
 454 }
 455 
 456 // Called by the outside world; returns a copy of the stack traces
 457 // (because we could be replacing them as the user handles them).
 458 // The array is secretly null-terminated (to make it easier to reclaim).
 459 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) {
 460   LiveStackTraceDataCopier copier(_allocated_traces);
 461   copy_stack_traces(copier, traces);
 462 }
 463 
 464 // See comment on get_all_stack_traces
 465 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
 466   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 467                                      _recent_garbage_traces->size());
 468   copy_stack_traces(copier, traces);
 469 }
 470 
 471 // See comment on get_all_stack_traces
 472 void StackTraceStorage::get_frequent_garbage_stack_traces(
 473     jvmtiStackTraces *traces) {
 474   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 475                                      _frequent_garbage_traces->size());
 476   copy_stack_traces(copier, traces);
 477 }
 478 
 479 
 480 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
 481                                           jvmtiStackTraces *traces) {
 482   int len = copier.size();
 483 
 484   // Create a new array to store the StackTraceData objects.
 485   // + 1 for a NULL at the end.
 486   jvmtiStackTrace *t =
 487       NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
 488   if (t == NULL) {
 489     traces->stack_traces = NULL;
 490     traces->trace_count = 0;
 491     return;
 492   }
 493   // +1 to have a NULL at the end of the array.
 494   memset(t, 0, (len + 1) * sizeof(*t));
 495 
 496   // Copy the StackTraceData objects into the new array.
 497   int trace_count = 0;
 498   for (int i = 0; i < len; i++) {
 499     const StackTraceData *stack_trace = copier.get(i);
 500     if (stack_trace != NULL) {
 501       jvmtiStackTrace *to = &t[trace_count];
 502       if (!deep_copy(to, stack_trace)) {
 503         continue;
 504       }
 505       trace_count++;
 506     }
 507   }
 508 
 509   traces->stack_traces = t;
 510   traces->trace_count = trace_count;
 511 }
 512 
 513 void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) {
 514   StackTraceData *new_trace = new StackTraceData();
 515   *new_trace = trace;
 516 
 517   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 518 
 519   // Accepted is on the right of the boolean to force the store_trace to happen.
 520   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 521 
 522   if (!accepted) {
 523     // No one wanted to use it.
 524     delete new_trace;
 525   }
 526 }
 527 
 528 // Delegate the initialization question to the underlying storage system.
 529 bool HeapMonitoring::initialized() {
 530   return StackTraceStorage::storage()->initialized();
 531 }
 532 
 533 // Delegate the initialization question to the underlying storage system.
 534 bool *HeapMonitoring::initialized_address() {
 535   return
 536       const_cast<bool*>(StackTraceStorage::storage()->initialized_address());
 537 }
 538 
 539 void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) {
 540   StackTraceStorage::storage()->get_all_stack_traces(traces);
 541 }
 542 
 543 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) {
 544   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
 545 }
 546 
 547 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) {
 548   StackTraceStorage::storage()->get_garbage_stack_traces(traces);
 549 }
 550 
 551 void HeapMonitoring::release_traces(jvmtiStackTraces *trace_info) {
 552   jint trace_count = trace_info->trace_count;
 553   jvmtiStackTrace *traces = trace_info->stack_traces;
 554 
 555   for (jint i = 0; i < trace_count; i++) {
 556     jvmtiStackTrace *current_trace = traces + i;
 557     FREE_C_HEAP_ARRAY(jvmtiCallFrame, current_trace->frames);
 558   }
 559 
 560   FREE_C_HEAP_ARRAY(jvmtiStackTrace, trace_info->stack_traces);
 561   trace_info->trace_count = 0;
 562   trace_info->stack_traces = NULL;
 563 }
 564 
 565 // Invoked by the GC to clean up old stack traces and remove old arrays
 566 // of instrumentation that are still lying around.
 567 void HeapMonitoring::do_weak_oops(
 568     AbstractRefProcTaskExecutor *task_executor,
 569     BoolObjectClosure* is_alive,
 570     OopClosure *f,
 571     VoidClosure *complete_gc) {
 572   if (task_executor != NULL) {
 573     task_executor->set_single_threaded_mode();
 574   }
 575   StackTraceStorage::storage()->do_weak_oops(is_alive, f, complete_gc);
 576 }
 577 
 578 void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) {
 579   // Ignore if already enabled.
 580   if (_enabled) {
 581     return;
 582   }
 583 
 584   _monitoring_rate = monitoring_rate;
 585 
 586   // Initalize and reset.
 587   StackTraceStorage::initialize_stack_trace_storage(max_storage);
 588 
 589   // Populate the lookup table for fast_log2.
 590   // This approximates the log2 curve with a step function.
 591   // Steps have height equal to log2 of the mid-point of the step.
 592   for (int i = 0; i < (1 << kFastlogNumBits); i++) {
 593     double half_way = static_cast<double>(i + 0.5);
 594     _log_table[i] = (log(1.0 + half_way / (1 << kFastlogNumBits)) / log(2.0));
 595   }
 596 
 597   JavaThread *t = static_cast<JavaThread *>(Thread::current());
 598   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 599   if (_rnd == 0) {
 600     _rnd = 1;
 601   }
 602   for (int i = 0; i < 20; i++) {
 603     _rnd = next_random(_rnd);
 604   }
 605 
 606   _enabled = true;
 607 }
 608 
 609 void HeapMonitoring::stop_profiling() {
 610   _enabled = false;
 611 }
 612 
 613 // Generates a geometric variable with the specified mean (512K by default).
 614 // This is done by generating a random number between 0 and 1 and applying
 615 // the inverse cumulative distribution function for an exponential.
 616 // Specifically: Let m be the inverse of the sample rate, then
 617 // the probability distribution function is m*exp(-mx) so the CDF is
 618 // p = 1 - exp(-mx), so
 619 // q = 1 - p = exp(-mx)
 620 // log_e(q) = -mx
 621 // -log_e(q)/m = x
 622 // log_2(q) * (-log_e(2) * 1/m) = x
 623 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 624 void HeapMonitoring::pick_next_sample(size_t *ptr) {
 625   _rnd = next_random(_rnd);
 626   // Take the top 26 bits as the random number
 627   // (This plus a 1<<58 sampling bound gives a max possible step of
 628   // 5194297183973780480 bytes.  In this case,
 629   // for sample_parameter = 1<<19, max possible step is
 630   // 9448372 bytes (24 bits).
 631   const uint64_t prng_mod_power = 48;  // Number of bits in prng
 632   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 633   // under piii debug for some binaries.
 634   double q = static_cast<uint32_t>(_rnd >> (prng_mod_power - 26)) + 1.0;
 635   // Put the computed p-value through the CDF of a geometric.
 636   // For faster performance (save ~1/20th exec time), replace
 637   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 638   // The value 26.000705 is used rather than 26 to compensate
 639   // for inaccuracies in FastLog2 which otherwise result in a
 640   // negative answer.
 641   double log_val = (fast_log2(q) - 26);
 642   *ptr = static_cast<size_t>(
 643       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 644 }
 645 
 646 // Called from the interpreter and C1
 647 void HeapMonitoring::object_alloc_unsized(oopDesc* o) {
 648   JavaThread *thread = static_cast<JavaThread *>(Thread::current());
 649   assert(o->size() << LogHeapWordSize == static_cast<long>(byte_size),
 650          "Object size is incorrect.");
 651   object_alloc_do_sample(thread, o, o->size() << LogHeapWordSize);
 652 }
 653 
 654 void HeapMonitoring::object_alloc(oopDesc* o, intx byte_size) {
 655   JavaThread *thread = static_cast<JavaThread *>(Thread::current());
 656   object_alloc_do_sample(thread, o, byte_size);
 657 }
 658 
 659 // Called directly by C2
 660 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
 661 #if defined(X86) || defined(PPC)
 662   JavaThread *thread = static_cast<JavaThread *>(t);
 663   if (StackTraceStorage::storage()->is_initialized()) {
 664     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 665     JavaThread *thread = static_cast<JavaThread *>(t);
 666 
 667     jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
 668     if (trace == NULL) {
 669       return;
 670     }
 671 
 672     jvmtiCallFrame *frames =
 673         NEW_C_HEAP_ARRAY(jvmtiCallFrame, kMaxStackDepth, mtInternal);
 674 
 675     if (frames == NULL) {
 676       FREE_C_HEAP_OBJ(trace);
 677       return;
 678     }
 679 
 680     trace->frames = frames;
 681     trace->env_id = (JavaThread::current())->jni_environment();
 682     trace->thread_id = SharedRuntime::get_java_tid(thread);
 683     trace->size = byte_size;
 684     trace->frame_count = 0;
 685 
 686     if (thread->has_last_Java_frame()) { // just to be safe
 687       vframeStream vfst(thread, true);
 688       int count = 0;
 689       while (!vfst.at_end() && count < kMaxStackDepth) {
 690         Method* m = vfst.method();
 691         frames[count].bci = vfst.bci();
 692         frames[count].method_id = m->jmethod_id();
 693         count++;
 694 
 695         vfst.next();
 696       }
 697       trace->frame_count = count;
 698     }
 699 
 700     if (trace->frame_count> 0) {
 701       // Success!
 702       StackTraceStorage::storage()->add_trace(trace, o);
 703       return;
 704     }
 705 
 706     // Failure!
 707     FREE_C_HEAP_ARRAY(jvmtiCallFrame, trace->frames);
 708     FREE_C_HEAP_OBJ(trace);
 709     return;
 710   } else {
 711     // There is something like 64K worth of allocation before the VM
 712     // initializes.  This is just in the interests of not slowing down
 713     // startup.
 714     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 715   }
 716 #else
 717   Unimplemented();
 718 #endif
 719 }