1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "prims/forte.hpp"
  27 #include "runtime/heapMonitoring.hpp"
  28 
  29 // Keep muxlock for now
  30 // Now that ASGCT is gone, a bit of refactoring in the addtrace... and
  31 // StackTraceData
  32 
  33 const int kMaxStackDepth = 64;
  34 
  35 // Internal data structure representing traces.
  36 struct StackTraceData : CHeapObj<mtInternal> {
  37   jvmtiStackTrace *trace;
  38   oop obj;
  39   int references;
  40 
  41   StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
  42 
  43   StackTraceData() : trace(NULL), obj(NULL), references(0) {}
  44 
  45   // StackTraceDatas are shared around the board between various lists. So
  46   // handle this by hand instead of having this in the destructor. There are
  47   // cases where the struct is on the stack but holding heap data not to be
  48   // freed.
  49   static void FreeData(StackTraceData *data) {
  50     if (data->trace != NULL) {
  51       FREE_C_HEAP_ARRAY(jvmtiCallFrame, data->trace->frames);
  52       FREE_C_HEAP_OBJ(data->trace);
  53     }
  54     delete data;
  55   }
  56 };
  57 
  58 //  RAII class that acquires / releases lock
  59 class MuxLocker : StackObj {
  60  private:
  61   volatile intptr_t *_lock;
  62   const char *_name;
  63  public:
  64   MuxLocker(volatile intptr_t *lock, const char *name) :
  65       _lock(lock),
  66       _name(name) {
  67     Thread::muxAcquire(lock, name);
  68   }
  69   ~MuxLocker() {
  70     Thread::muxRelease(_lock);
  71   }
  72 };
  73 
  74 // Fixed size buffer for holding garbage traces.
  75 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  76  public:
  77   GarbageTracesBuffer(uint32_t size) : _size(size) {
  78     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
  79                                        size,
  80                                        mtInternal);
  81     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
  82   }
  83 
  84   virtual ~GarbageTracesBuffer() {
  85     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
  86   }
  87 
  88   StackTraceData** get_traces() const {
  89     return _garbage_traces;
  90   }
  91 
  92   bool store_trace(StackTraceData *trace) {
  93     uint32_t index;
  94     if (!select_replacement(&index)) {
  95       return false;
  96     }
  97 
  98     StackTraceData *old_data = _garbage_traces[index];
  99 
 100     if (old_data != NULL) {
 101       old_data->references--;
 102 
 103       if (old_data->references == 0) {
 104         StackTraceData::FreeData(old_data);
 105       }
 106     }
 107 
 108     trace->references++;
 109     _garbage_traces[index] = trace;
 110     return true;
 111   }
 112 
 113   uint32_t size() const {
 114     return _size;
 115   }
 116 
 117  protected:
 118   // Subclasses select the trace to replace. Returns false if no replacement
 119   // is to happen, otherwise stores the index of the trace to replace in
 120   // *index.
 121   virtual bool select_replacement(uint32_t *index) = 0;
 122 
 123   const uint32_t _size;
 124 
 125  private:
 126   // The current garbage traces.  A fixed-size ring buffer.
 127   StackTraceData **_garbage_traces;
 128 };
 129 
 130 // Keep statistical sample of traces over the lifetime of the server.
 131 // When the buffer is full, replace a random entry with probability
 132 // 1/samples_seen. This strategy tends towards preserving the most frequently
 133 // occuring traces over time.
 134 class FrequentGarbageTraces : public GarbageTracesBuffer {
 135  public:
 136   FrequentGarbageTraces(int size)
 137       : GarbageTracesBuffer(size),
 138       _garbage_traces_pos(0),
 139       _samples_seen(0) {
 140       }
 141 
 142   virtual ~FrequentGarbageTraces() {
 143   }
 144 
 145   virtual bool select_replacement(uint32_t* index) {
 146     ++_samples_seen;
 147 
 148     if (_garbage_traces_pos < _size) {
 149       *index = _garbage_traces_pos++;
 150       return true;
 151     }
 152 
 153     uint64_t random_uint64 =
 154         (static_cast<uint64_t>(::random()) << 32)
 155         | ::random();
 156 
 157     uint32_t random_index = random_uint64 % _samples_seen;
 158     if (random_index < _size) {
 159       *index = random_index;
 160       return true;
 161     }
 162 
 163     return false;
 164   }
 165 
 166  private:
 167   // The current position in the buffer as we initially fill it.
 168   uint32_t _garbage_traces_pos;
 169 
 170   uint64_t _samples_seen;
 171 };
 172 
 173 // Store most recent garbage traces.
 174 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 175  public:
 176   MostRecentGarbageTraces(int size)
 177       : GarbageTracesBuffer(size),
 178       _garbage_traces_pos(0) {
 179       }
 180 
 181   virtual ~MostRecentGarbageTraces() {
 182   }
 183 
 184   virtual bool select_replacement(uint32_t* index) {
 185     *index = _garbage_traces_pos;
 186 
 187     _garbage_traces_pos =
 188         (_garbage_traces_pos + 1) % _size;
 189 
 190     return true;
 191   }
 192 
 193  private:
 194   // The current position in the buffer.
 195   uint32_t _garbage_traces_pos;
 196 };
 197 
 198 // Each object that we profile is stored as trace with the thread_id.
 199 class StackTraceStorage {
 200  public:
 201   // The function that gets called to add a trace to the list of
 202   // traces we are maintaining.
 203   void add_trace(jvmtiStackTrace *trace, oop o);
 204 
 205   // The function that gets called by the client to retrieve the list
 206   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 207   void get_all_stack_traces(jvmtiStackTraces *traces);
 208 
 209   // The function that gets called by the client to retrieve the list
 210   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 211   void get_garbage_stack_traces(jvmtiStackTraces *traces);
 212 
 213   // The function that gets called by the client to retrieve the list
 214   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 215   void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces);
 216 
 217   // Executes whenever weak references are traversed.  is_alive tells
 218   // you if the given oop is still reachable and live.
 219   void do_weak_oops(BoolObjectClosure* is_alive,
 220                     OopClosure *f,
 221                     VoidClosure* complete_gc);
 222 
 223   ~StackTraceStorage();
 224   StackTraceStorage();
 225 
 226   // The global storage.  Not a global static because
 227   // StackTraceStorage isn't available at module-loading time.
 228   static StackTraceStorage* storage() {
 229     static StackTraceStorage storage;
 230     return &storage;
 231   }
 232 
 233   bool IsInitialized() {
 234     return _initialized;
 235   }
 236 
 237   // Static method to set the storage in place at initialization.
 238   static void InitializeStackTraceStorage(int max_storage) {
 239     StackTraceStorage *storage = StackTraceStorage::storage();
 240     storage->InitializeStorage(max_storage);
 241   }
 242 
 243   bool initialized() { return _initialized; }
 244   volatile bool *initialized_address() { return &_initialized; }
 245 
 246  private:
 247   // Protects the traces currently sampled (below).
 248   volatile intptr_t _allocated_traces_lock[1];
 249 
 250   // The traces currently sampled.
 251   GrowableArray<StackTraceData> *_allocated_traces;
 252 
 253   // Recent garbage traces.
 254   MostRecentGarbageTraces *_recent_garbage_traces;
 255 
 256   // Frequent garbage traces.
 257   FrequentGarbageTraces *_frequent_garbage_traces;
 258 
 259   // Maximum size of the allocation.
 260   size_t _allocated_traces_size;
 261 
 262   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 263   int _max_storage;
 264 
 265   volatile bool _initialized;
 266 
 267   // Support functions and classes for copying data to the external
 268   // world.
 269   class StackTraceDataCopier {
 270    public:
 271     virtual int size() const = 0;
 272     virtual const StackTraceData *get(uint32_t i) const = 0;
 273   };
 274 
 275   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 276    public:
 277     LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) :
 278         _data(data) {}
 279     int size() const { return _data->length(); }
 280     const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
 281 
 282    private:
 283     GrowableArray<StackTraceData> *_data;
 284   };
 285 
 286   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 287    public:
 288     GarbageStackTraceDataCopier(StackTraceData **data, int size) :
 289         _data(data), _size(size) {}
 290     int size() const { return _size; }
 291     const StackTraceData *get(uint32_t i) const { return _data[i]; }
 292 
 293    private:
 294     StackTraceData **_data;
 295     int _size;
 296   };
 297 
 298   // Instance initialization.
 299   void InitializeStorage(int max_storage);
 300 
 301   // Copies from StackTraceData to jvmtiStackTrace.
 302   bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
 303 
 304   // Creates a deep copy of the list of StackTraceData.
 305   void copy_stack_traces(const StackTraceDataCopier &copier,
 306                          jvmtiStackTraces *traces);
 307 
 308   void store_garbage_trace(const StackTraceData &trace);
 309 
 310   void FreeGarbage();
 311 
 312 };
 313 
 314 // Statics for Sampler
 315 double HeapMonitoring::_log_table[1 << kFastlogNumBits];
 316 
 317 jint HeapMonitoring::_monitoring_rate;
 318 
 319 // Cheap random number generator
 320 uint64_t HeapMonitoring::_rnd;
 321 
 322 StackTraceStorage::StackTraceStorage() :
 323   _allocated_traces(NULL),
 324   _recent_garbage_traces(NULL),
 325   _frequent_garbage_traces(NULL),
 326   _max_storage(0),
 327   _initialized(false) {
 328   _allocated_traces_lock[0] = 0;
 329 }
 330 
 331 void StackTraceStorage::FreeGarbage() {
 332   StackTraceData **recent_garbage = NULL;
 333   uint32_t recent_size = 0;
 334 
 335   StackTraceData **frequent_garbage = NULL;
 336   uint32_t frequent_size = 0;
 337 
 338   if (_recent_garbage_traces != NULL) {
 339     recent_garbage = _recent_garbage_traces->get_traces();
 340     recent_size = _recent_garbage_traces->size();
 341   }
 342 
 343   if (_frequent_garbage_traces != NULL) {
 344     frequent_garbage = _frequent_garbage_traces->get_traces();
 345     frequent_size = _frequent_garbage_traces->size();
 346   }
 347 
 348   // Simple solution since this happens at exit.
 349   // Go through the recent and remove any that only are referenced there.
 350   for (uint32_t i = 0; i < recent_size; i++) {
 351     StackTraceData *trace = recent_garbage[i];
 352     if (trace != NULL) {
 353       trace->references--;
 354 
 355       if (trace->references == 0) {
 356         StackTraceData::FreeData(trace);
 357       }
 358     }
 359   }
 360 
 361   // Then go through the frequent and remove those that are now only there.
 362   for (uint32_t i = 0; i < frequent_size; i++) {
 363     StackTraceData *trace = frequent_garbage[i];
 364     if (trace != NULL) {
 365       trace->references--;
 366 
 367       if (trace->references == 0) {
 368         StackTraceData::FreeData(trace);
 369       }
 370     }
 371   }
 372 }
 373 
 374 StackTraceStorage::~StackTraceStorage() {
 375   MuxLocker mu(_allocated_traces_lock, "StackTraceStorage::Destructor");
 376   delete _allocated_traces;
 377 
 378   FreeGarbage();
 379   delete _recent_garbage_traces;
 380   delete _frequent_garbage_traces;
 381   _initialized = false;
 382 }
 383 
 384 void StackTraceStorage::InitializeStorage(int max_storage) {
 385   MuxLocker mu(_allocated_traces_lock, "StackTraceStorage::InitializeStorage");
 386 
 387   // In case multiple threads got locked and then 1 by 1 got through.
 388   if (_initialized) {
 389     return;
 390   }
 391 
 392   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 393       GrowableArray<StackTraceData>(128, true);
 394 
 395   _recent_garbage_traces = new MostRecentGarbageTraces(max_storage);
 396   _frequent_garbage_traces = new FrequentGarbageTraces(max_storage);
 397 
 398   _max_storage = max_storage;
 399   _initialized = true;
 400 }
 401 
 402 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
 403   StackTraceData new_data(trace, o);
 404 
 405   MuxLocker mu(_allocated_traces_lock, "StackTraceStorage::add_trace");
 406   _allocated_traces->append(new_data);
 407 }
 408 
 409 void StackTraceStorage::do_weak_oops(BoolObjectClosure *is_alive,
 410                                      OopClosure *f,
 411                                      VoidClosure *complete_gc) {
 412   MuxLocker mu(_allocated_traces_lock, "StackTraceStorage::do_weak_oops");
 413 
 414   if (IsInitialized()) {
 415     int len = _allocated_traces->length();
 416 
 417     // Compact the oop traces.  Moves the live oops to the beginning of the
 418     // growable array, potentially overwriting the dead ones.
 419     int curr_pos = 0;
 420     for (int i = 0; i < len; i++) {
 421       StackTraceData &trace = _allocated_traces->at(i);
 422       oop value = trace.obj;
 423       if ((value != NULL && Universe::heap()->is_in_reserved(value)) &&
 424           (is_alive == NULL || is_alive->do_object_b(value))) {
 425         // Update the oop to point to the new object if it is still alive.
 426         f->do_oop(&(trace.obj));
 427 
 428         // Copy the old trace, if it is still live.
 429         _allocated_traces->at_put(curr_pos++, trace);
 430       } else {
 431         // If the old trace is no longer live, add it to the list of
 432         // recently collected garbage.
 433         store_garbage_trace(trace);
 434       }
 435     }
 436 
 437     // Zero out remaining array elements.  Even though the call to trunc_to
 438     // below truncates these values, zeroing them out is good practice.
 439     StackTraceData zero_trace;
 440     for (int i = curr_pos; i < len; i++) {
 441       _allocated_traces->at_put(i, zero_trace);
 442     }
 443 
 444     // Set the array's length to the number of live elements.
 445     _allocated_traces->trunc_to(curr_pos);
 446     if (complete_gc != NULL) {
 447       complete_gc->do_void();
 448     }
 449   }
 450 }
 451 
 452 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to,
 453                                   const StackTraceData *from) {
 454   const jvmtiStackTrace *src = from->trace;
 455   *to = *src;
 456 
 457   to->frames =
 458       NEW_C_HEAP_ARRAY(jvmtiCallFrame, kMaxStackDepth, mtInternal);
 459 
 460   if (to->frames == NULL) {
 461     return false;
 462   }
 463 
 464   memcpy(to->frames,
 465          src->frames,
 466          sizeof(jvmtiCallFrame) * kMaxStackDepth);
 467   return true;
 468 }
 469 
 470 // Called by the outside world; returns a copy of the stack traces
 471 // (because we could be replacing them as the user handles them).
 472 // The array is secretly null-terminated (to make it easier to reclaim).
 473 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) {
 474   LiveStackTraceDataCopier copier(_allocated_traces);
 475   copy_stack_traces(copier, traces);
 476 }
 477 
 478 // See comment on get_all_stack_traces
 479 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
 480   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 481                                      _recent_garbage_traces->size());
 482   copy_stack_traces(copier, traces);
 483 }
 484 
 485 // See comment on get_all_stack_traces
 486 void StackTraceStorage::get_frequent_garbage_stack_traces(
 487     jvmtiStackTraces *traces) {
 488   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 489                                      _frequent_garbage_traces->size());
 490   copy_stack_traces(copier, traces);
 491 }
 492 
 493 
 494 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
 495                                           jvmtiStackTraces *traces) {
 496   MuxLocker mu(_allocated_traces_lock, "StackTraceStorage::copy_stack_traces");
 497   int len = copier.size();
 498 
 499   // Create a new array to store the StackTraceData objects.
 500   // + 1 for a NULL at the end.
 501   jvmtiStackTrace *t =
 502       NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
 503   if (t == NULL) {
 504     traces->stack_traces = NULL;
 505     traces->trace_count = 0;
 506     return;
 507   }
 508   // +1 to have a NULL at the end of the array.
 509   memset(t, 0, (len + 1) * sizeof(*t));
 510 
 511   // Copy the StackTraceData objects into the new array.
 512   int trace_count = 0;
 513   for (int i = 0; i < len; i++) {
 514     const StackTraceData *stack_trace = copier.get(i);
 515     if (stack_trace != NULL) {
 516       jvmtiStackTrace *to = &t[trace_count];
 517       if (!deep_copy(to, stack_trace)) {
 518         continue;
 519       }
 520       trace_count++;
 521     }
 522   }
 523 
 524   traces->stack_traces = t;
 525   traces->trace_count = trace_count;
 526 }
 527 
 528 void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) {
 529   StackTraceData *new_trace = new StackTraceData();
 530   *new_trace = trace;
 531 
 532   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 533 
 534   // Accepted is on the right of the boolean to force the store_trace to happen.
 535   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 536 
 537   if (!accepted) {
 538     // No one wanted to use it.
 539     delete new_trace;
 540   }
 541 }
 542 
 543 // Delegate the initialization question to the underlying storage system.
 544 bool HeapMonitoring::initialized() {
 545   return StackTraceStorage::storage()->initialized();
 546 }
 547 
 548 // Delegate the initialization question to the underlying storage system.
 549 bool *HeapMonitoring::initialized_address() {
 550   return
 551       const_cast<bool*>(StackTraceStorage::storage()->initialized_address());
 552 }
 553 
 554 void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) {
 555   StackTraceStorage::storage()->get_all_stack_traces(traces);
 556 }
 557 
 558 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) {
 559   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
 560 }
 561 
 562 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) {
 563   StackTraceStorage::storage()->get_garbage_stack_traces(traces);
 564 }
 565 
 566 void HeapMonitoring::release_traces(jvmtiStackTraces *trace_info) {
 567   jint trace_count = trace_info->trace_count;
 568   jvmtiStackTrace *traces = trace_info->stack_traces;
 569 
 570   for (jint i = 0; i < trace_count; i++) {
 571     jvmtiStackTrace *current_trace = traces + i;
 572     FREE_C_HEAP_ARRAY(jvmtiCallFrame, current_trace->frames);
 573   }
 574 
 575   FREE_C_HEAP_ARRAY(jvmtiStackTrace, trace_info->stack_traces);
 576   trace_info->trace_count = 0;
 577   trace_info->stack_traces = NULL;
 578 }
 579 
 580 // Invoked by the GC to clean up old stack traces and remove old arrays
 581 // of instrumentation that are still lying around.
 582 void HeapMonitoring::do_weak_oops(
 583     AbstractRefProcTaskExecutor *task_executor,
 584     BoolObjectClosure* is_alive,
 585     OopClosure *f,
 586     VoidClosure *complete_gc) {
 587   if (task_executor != NULL) {
 588     task_executor->set_single_threaded_mode();
 589   }
 590   StackTraceStorage::storage()->do_weak_oops(is_alive, f, complete_gc);
 591 }
 592 
 593 void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) {
 594   _monitoring_rate = monitoring_rate;
 595 
 596   StackTraceStorage::InitializeStackTraceStorage(max_storage);
 597 
 598   // Populate the lookup table for fast_log2.
 599   // This approximates the log2 curve with a step function.
 600   // Steps have height equal to log2 of the mid-point of the step.
 601   for (int i = 0; i < (1 << kFastlogNumBits); i++) {
 602     double half_way = static_cast<double>(i + 0.5);
 603     _log_table[i] = (log(1.0 + half_way / (1 << kFastlogNumBits)) / log(2.0));
 604   }
 605 
 606   JavaThread *t = static_cast<JavaThread *>(Thread::current());
 607   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 608   if (_rnd == 0) {
 609     _rnd = 1;
 610   }
 611   for (int i = 0; i < 20; i++) {
 612     _rnd = next_random(_rnd);
 613   }
 614 }
 615 
 616 // Generates a geometric variable with the specified mean (512K by default).
 617 // This is done by generating a random number between 0 and 1 and applying
 618 // the inverse cumulative distribution function for an exponential.
 619 // Specifically: Let m be the inverse of the sample rate, then
 620 // the probability distribution function is m*exp(-mx) so the CDF is
 621 // p = 1 - exp(-mx), so
 622 // q = 1 - p = exp(-mx)
 623 // log_e(q) = -mx
 624 // -log_e(q)/m = x
 625 // log_2(q) * (-log_e(2) * 1/m) = x
 626 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 627 void HeapMonitoring::pick_next_sample(size_t *ptr) {
 628   _rnd = next_random(_rnd);
 629   // Take the top 26 bits as the random number
 630   // (This plus a 1<<58 sampling bound gives a max possible step of
 631   // 5194297183973780480 bytes.  In this case,
 632   // for sample_parameter = 1<<19, max possible step is
 633   // 9448372 bytes (24 bits).
 634   const uint64_t prng_mod_power = 48;  // Number of bits in prng
 635   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 636   // under piii debug for some binaries.
 637   double q = static_cast<uint32_t>(_rnd >> (prng_mod_power - 26)) + 1.0;
 638   // Put the computed p-value through the CDF of a geometric.
 639   // For faster performance (save ~1/20th exec time), replace
 640   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 641   // The value 26.000705 is used rather than 26 to compensate
 642   // for inaccuracies in FastLog2 which otherwise result in a
 643   // negative answer.
 644   double log_val = (fast_log2(q) - 26);
 645   *ptr = static_cast<size_t>(
 646       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 647 }
 648 
 649 // Called from the interpreter and C1
 650 void HeapMonitoring::object_alloc_unsized(oopDesc* o) {
 651   JavaThread *thread = static_cast<JavaThread *>(Thread::current());
 652   assert(o->size() << LogHeapWordSize == static_cast<long>(byte_size),
 653          "Object size is incorrect.");
 654   object_alloc_do_sample(thread, o, o->size() << LogHeapWordSize);
 655 }
 656 
 657 void HeapMonitoring::object_alloc(oopDesc* o, intx byte_size) {
 658   JavaThread *thread = static_cast<JavaThread *>(Thread::current());
 659   object_alloc_do_sample(thread, o, byte_size);
 660 }
 661 
 662 // Called directly by C2
 663 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
 664 #if defined(X86) || defined(PPC)
 665   JavaThread *thread = static_cast<JavaThread *>(t);
 666   if (StackTraceStorage::storage()->IsInitialized()) {
 667     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 668     JavaThread *thread = static_cast<JavaThread *>(t);
 669 
 670     jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
 671     if (trace == NULL) {
 672       return;
 673     }
 674 
 675     jvmtiCallFrame *frames =
 676         NEW_C_HEAP_ARRAY(jvmtiCallFrame, kMaxStackDepth, mtInternal);
 677 
 678     if (frames == NULL) {
 679       FREE_C_HEAP_OBJ(trace);
 680       return;
 681     }
 682 
 683     trace->frames = frames;
 684     trace->env_id = (JavaThread::current())->jni_environment();
 685     trace->thread_id = SharedRuntime::get_java_tid(thread);
 686     trace->size = byte_size;
 687     trace->frame_count = 0;
 688 
 689     if (thread->has_last_Java_frame()) { // just to be safe
 690       vframeStream vfst(thread, true);
 691       int count = 0;
 692       while (!vfst.at_end() && count < kMaxStackDepth) {
 693         Method* m = vfst.method();
 694         frames[count].bci = vfst.bci();
 695         frames[count].method_id = m->jmethod_id();
 696         count++;
 697 
 698         vfst.next();
 699       }
 700       trace->frame_count = count;
 701     }
 702 
 703     if (trace->frame_count> 0) {
 704       // Success!
 705       StackTraceStorage::storage()->add_trace(trace, o);
 706       return;
 707     }
 708 
 709     // Failure!
 710     FREE_C_HEAP_ARRAY(jvmtiCallFrame, trace->frames);
 711     FREE_C_HEAP_OBJ(trace);
 712     return;
 713   } else {
 714     // There is something like 64K worth of allocation before the VM
 715     // initializes.  This is just in the interests of not slowing down
 716     // startup.
 717     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 718   }
 719 #else
 720   Unimplemented();
 721 #endif
 722 }