1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/heapMonitoring.hpp"
  30 #include "runtime/vframe.hpp"
  31 
  32 // DONE:
  33 //  merged printouts
  34 //  broke up the one-liner
  35 //  talk about synchro
  36 //  cleaned up old entry points for C1/interpreter
  37 //  add statistics per GC and log start up initialization.
  38 //  removed the null pointer check during the weak_oops_do walk
  39 //  cleaned up the task_executor
  40 //  fixed the compilation using the option --disable-precompiled-header
  41 
  42 static const int MaxStackDepth = 64;
  43 
  44 // Internal data structure representing traces.
  45 struct StackTraceData : CHeapObj<mtInternal> {
  46   jvmtiStackTrace *trace;
  47   oop obj;
  48   int references;
  49 
  50   StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
  51 
  52   StackTraceData() : trace(NULL), obj(NULL), references(0) {}
  53 
  54   // StackTraceDatas are shared around the board between various lists. So
  55   // handle this by hand instead of having this in the destructor. There are
  56   // cases where the struct is on the stack but holding heap data not to be
  57   // freed.
  58   static void free_data(StackTraceData *data) {
  59     if (data->trace != NULL) {
  60       FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
  61       FREE_C_HEAP_OBJ(data->trace);
  62     }
  63     delete data;
  64   }
  65 };
  66 
  67 // Fixed size buffer for holding garbage traces.
  68 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  69  public:
  70   GarbageTracesBuffer(uint32_t size) : _size(size) {
  71     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
  72                                        size,
  73                                        mtInternal);
  74     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
  75   }
  76 
  77   virtual ~GarbageTracesBuffer() {
  78     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
  79   }
  80 
  81   StackTraceData** get_traces() const {
  82     return _garbage_traces;
  83   }
  84 
  85   bool store_trace(StackTraceData *trace) {
  86     uint32_t index;
  87     if (!select_replacement(&index)) {
  88       return false;
  89     }
  90 
  91     StackTraceData *old_data = _garbage_traces[index];
  92 
  93     if (old_data != NULL) {
  94       old_data->references--;
  95 
  96       if (old_data->references == 0) {
  97         StackTraceData::free_data(old_data);
  98       }
  99     }
 100 
 101     trace->references++;
 102     _garbage_traces[index] = trace;
 103     return true;
 104   }
 105 
 106   uint32_t size() const {
 107     return _size;
 108   }
 109 
 110  protected:
 111   // Subclasses select the trace to replace. Returns false if no replacement
 112   // is to happen, otherwise stores the index of the trace to replace in
 113   // *index.
 114   virtual bool select_replacement(uint32_t *index) = 0;
 115 
 116   const uint32_t _size;
 117 
 118  private:
 119   // The current garbage traces.  A fixed-size ring buffer.
 120   StackTraceData **_garbage_traces;
 121 };
 122 
 123 // Keep statistical sample of traces over the lifetime of the server.
 124 // When the buffer is full, replace a random entry with probability
 125 // 1/samples_seen. This strategy tends towards preserving the most frequently
 126 // occuring traces over time.
 127 class FrequentGarbageTraces : public GarbageTracesBuffer {
 128  public:
 129   FrequentGarbageTraces(int size)
 130       : GarbageTracesBuffer(size),
 131       _garbage_traces_pos(0),
 132       _samples_seen(0) {
 133       }
 134 
 135   virtual ~FrequentGarbageTraces() {
 136   }
 137 
 138   virtual bool select_replacement(uint32_t* index) {
 139     ++_samples_seen;
 140 
 141     if (_garbage_traces_pos < _size) {
 142       *index = _garbage_traces_pos++;
 143       return true;
 144     }
 145 
 146     uint64_t random_uint64 =
 147         (static_cast<uint64_t>(::random()) << 32) | ::random();
 148 
 149     uint32_t random_index = random_uint64 % _samples_seen;
 150     if (random_index < _size) {
 151       *index = random_index;
 152       return true;
 153     }
 154 
 155     return false;
 156   }
 157 
 158  private:
 159   // The current position in the buffer as we initially fill it.
 160   uint32_t _garbage_traces_pos;
 161 
 162   uint64_t _samples_seen;
 163 };
 164 
 165 // Store most recent garbage traces.
 166 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 167  public:
 168   MostRecentGarbageTraces(int size)
 169       : GarbageTracesBuffer(size),
 170       _garbage_traces_pos(0) {
 171       }
 172 
 173   virtual ~MostRecentGarbageTraces() {
 174   }
 175 
 176   virtual bool select_replacement(uint32_t* index) {
 177     *index = _garbage_traces_pos;
 178 
 179     _garbage_traces_pos =
 180         (_garbage_traces_pos + 1) % _size;
 181 
 182     return true;
 183   }
 184 
 185  private:
 186   // The current position in the buffer.
 187   uint32_t _garbage_traces_pos;
 188 };
 189 
 190 // Each object that we profile is stored as trace with the thread_id.
 191 class StackTraceStorage : public CHeapObj<mtInternal> {
 192  public:
 193   // The function that gets called to add a trace to the list of
 194   // traces we are maintaining.
 195   void add_trace(jvmtiStackTrace *trace, oop o);
 196 
 197   // The function that gets called by the client to retrieve the list
 198   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 199   void get_all_stack_traces(jvmtiStackTraces *traces);
 200 
 201   // The function that gets called by the client to retrieve the list
 202   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 203   void get_garbage_stack_traces(jvmtiStackTraces *traces);
 204 
 205   // The function that gets called by the client to retrieve the list
 206   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 207   void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces);
 208 
 209   // Executes whenever weak references are traversed.  is_alive tells
 210   // you if the given oop is still reachable and live.
 211   size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f);
 212 
 213   ~StackTraceStorage();
 214   StackTraceStorage();
 215 
 216   static StackTraceStorage* storage() {
 217     if (internal_storage == NULL) {
 218       internal_storage = new StackTraceStorage();
 219     }
 220     return internal_storage;
 221   }
 222 
 223   static void reset_stack_trace_storage() {
 224     delete internal_storage;
 225     internal_storage = NULL;
 226   }
 227 
 228   bool is_initialized() {
 229     return _initialized;
 230   }
 231 
 232   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 233     return _stats;
 234   }
 235 
 236   // Static method to set the storage in place at initialization.
 237   static void initialize_stack_trace_storage(int max_storage) {
 238     reset_stack_trace_storage();
 239     StackTraceStorage *storage = StackTraceStorage::storage();
 240     storage->initialize_storage(max_storage);
 241   }
 242 
 243   void accumulate_sample_rate(size_t rate) {
 244     _stats.sample_rate_accumulation += rate;
 245     _stats.sample_rate_count++;
 246   }
 247 
 248   bool initialized() { return _initialized; }
 249   volatile bool *initialized_address() { return &_initialized; }
 250 
 251  private:
 252   // The traces currently sampled.
 253   GrowableArray<StackTraceData> *_allocated_traces;
 254 
 255   // Recent garbage traces.
 256   MostRecentGarbageTraces *_recent_garbage_traces;
 257 
 258   // Frequent garbage traces.
 259   FrequentGarbageTraces *_frequent_garbage_traces;
 260 
 261   // Heap Sampling statistics.
 262   jvmtiHeapSamplingStats _stats;
 263 
 264   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 265   int _max_storage;
 266 
 267   static StackTraceStorage* internal_storage;
 268   volatile bool _initialized;
 269 
 270   // Support functions and classes for copying data to the external
 271   // world.
 272   class StackTraceDataCopier {
 273    public:
 274     virtual int size() const = 0;
 275     virtual const StackTraceData *get(uint32_t i) const = 0;
 276   };
 277 
 278   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 279    public:
 280     LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) :
 281         _data(data) {}
 282     int size() const { return _data ? _data->length() : 0; }
 283     const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
 284 
 285    private:
 286     GrowableArray<StackTraceData> *_data;
 287   };
 288 
 289   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 290    public:
 291     GarbageStackTraceDataCopier(StackTraceData **data, int size) :
 292         _data(data), _size(size) {}
 293     int size() const { return _size; }
 294     const StackTraceData *get(uint32_t i) const { return _data[i]; }
 295 
 296    private:
 297     StackTraceData **_data;
 298     int _size;
 299   };
 300 
 301   // Instance initialization.
 302   void initialize_storage(int max_storage);
 303 
 304   // Copies from StackTraceData to jvmtiStackTrace.
 305   bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
 306 
 307   // Creates a deep copy of the list of StackTraceData.
 308   void copy_stack_traces(const StackTraceDataCopier &copier,
 309                          jvmtiStackTraces *traces);
 310 
 311   void store_garbage_trace(const StackTraceData &trace);
 312 
 313   void free_garbage();
 314 };
 315 
 316 StackTraceStorage* StackTraceStorage::internal_storage;
 317 
 318 // Statics for Sampler
 319 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 320 bool HeapMonitoring::_enabled;
 321 AlwaysTrueClosure HeapMonitoring::_always_true;
 322 jint HeapMonitoring::_monitoring_rate;
 323 
 324 // Cheap random number generator
 325 uint64_t HeapMonitoring::_rnd;
 326 
 327 StackTraceStorage::StackTraceStorage() :
 328   _allocated_traces(NULL),
 329   _recent_garbage_traces(NULL),
 330   _frequent_garbage_traces(NULL),
 331   _max_storage(0),
 332   _initialized(false) {
 333     memset(&_stats, 0, sizeof(_stats));
 334 }
 335 
 336 void StackTraceStorage::free_garbage() {
 337   StackTraceData **recent_garbage = NULL;
 338   uint32_t recent_size = 0;
 339 
 340   StackTraceData **frequent_garbage = NULL;
 341   uint32_t frequent_size = 0;
 342 
 343   if (_recent_garbage_traces != NULL) {
 344     recent_garbage = _recent_garbage_traces->get_traces();
 345     recent_size = _recent_garbage_traces->size();
 346   }
 347 
 348   if (_frequent_garbage_traces != NULL) {
 349     frequent_garbage = _frequent_garbage_traces->get_traces();
 350     frequent_size = _frequent_garbage_traces->size();
 351   }
 352 
 353   // Simple solution since this happens at exit.
 354   // Go through the recent and remove any that only are referenced there.
 355   for (uint32_t i = 0; i < recent_size; i++) {
 356     StackTraceData *trace = recent_garbage[i];
 357     if (trace != NULL) {
 358       trace->references--;
 359 
 360       if (trace->references == 0) {
 361         StackTraceData::free_data(trace);
 362       }
 363     }
 364   }
 365 
 366   // Then go through the frequent and remove those that are now only there.
 367   for (uint32_t i = 0; i < frequent_size; i++) {
 368     StackTraceData *trace = frequent_garbage[i];
 369     if (trace != NULL) {
 370       trace->references--;
 371 
 372       if (trace->references == 0) {
 373         StackTraceData::free_data(trace);
 374       }
 375     }
 376   }
 377 }
 378 
 379 StackTraceStorage::~StackTraceStorage() {
 380   delete _allocated_traces;
 381 
 382   free_garbage();
 383   delete _recent_garbage_traces;
 384   delete _frequent_garbage_traces;
 385   _initialized = false;
 386 }
 387 
 388 void StackTraceStorage::initialize_storage(int max_storage) {
 389   // In case multiple threads got locked and then 1 by 1 got through.
 390   if (_initialized) {
 391     return;
 392   }
 393 
 394   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 395       GrowableArray<StackTraceData>(128, true);
 396 
 397   _recent_garbage_traces = new MostRecentGarbageTraces(max_storage);
 398   _frequent_garbage_traces = new FrequentGarbageTraces(max_storage);
 399 
 400   _max_storage = max_storage;
 401   _initialized = true;
 402 }
 403 
 404 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
 405   StackTraceData new_data(trace, o);
 406   _stats.sample_count++;
 407   _stats.stack_depth_accumulation += trace->frame_count;
 408   _allocated_traces->append(new_data);
 409 }
 410 
 411 size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
 412                                        OopClosure *f) {
 413   size_t count = 0;
 414   if (is_initialized()) {
 415     int len = _allocated_traces->length();
 416 
 417     // Compact the oop traces.  Moves the live oops to the beginning of the
 418     // growable array, potentially overwriting the dead ones.
 419     int curr_pos = 0;
 420     for (int i = 0; i < len; i++) {
 421       StackTraceData &trace = _allocated_traces->at(i);
 422       oop value = trace.obj;
 423       if (Universe::heap()->is_in_reserved(value)
 424           && is_alive->do_object_b(value)) {
 425         // Update the oop to point to the new object if it is still alive.
 426         f->do_oop(&(trace.obj));
 427 
 428         // Copy the old trace, if it is still live.
 429         _allocated_traces->at_put(curr_pos++, trace);
 430 
 431         count++;
 432       } else {
 433         // If the old trace is no longer live, add it to the list of
 434         // recently collected garbage.
 435         store_garbage_trace(trace);
 436       }
 437     }
 438 
 439     // Zero out remaining array elements.  Even though the call to trunc_to
 440     // below truncates these values, zeroing them out is good practice.
 441     StackTraceData zero_trace;
 442     for (int i = curr_pos; i < len; i++) {
 443       _allocated_traces->at_put(i, zero_trace);
 444     }
 445 
 446     // Set the array's length to the number of live elements.
 447     _allocated_traces->trunc_to(curr_pos);
 448   }
 449 
 450   return count;
 451 }
 452 
 453 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to,
 454                                   const StackTraceData *from) {
 455   const jvmtiStackTrace *src = from->trace;
 456   *to = *src;
 457 
 458   to->frames =
 459       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 460 
 461   if (to->frames == NULL) {
 462     return false;
 463   }
 464 
 465   memcpy(to->frames,
 466          src->frames,
 467          sizeof(jvmtiFrameInfo) * MaxStackDepth);
 468   return true;
 469 }
 470 
 471 // Called by the outside world; returns a copy of the stack traces
 472 // (because we could be replacing them as the user handles them).
 473 // The array is secretly null-terminated (to make it easier to reclaim).
 474 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) {
 475   LiveStackTraceDataCopier copier(_allocated_traces);
 476   copy_stack_traces(copier, traces);
 477 }
 478 
 479 // See comment on get_all_stack_traces
 480 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
 481   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 482                                      _recent_garbage_traces->size());
 483   copy_stack_traces(copier, traces);
 484 }
 485 
 486 // See comment on get_all_stack_traces
 487 void StackTraceStorage::get_frequent_garbage_stack_traces(
 488     jvmtiStackTraces *traces) {
 489   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 490                                      _frequent_garbage_traces->size());
 491   copy_stack_traces(copier, traces);
 492 }
 493 
 494 
 495 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
 496                                           jvmtiStackTraces *traces) {
 497   int len = copier.size();
 498 
 499   // Create a new array to store the StackTraceData objects.
 500   // + 1 for a NULL at the end.
 501   jvmtiStackTrace *t =
 502       NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
 503   if (t == NULL) {
 504     traces->stack_traces = NULL;
 505     traces->trace_count = 0;
 506     return;
 507   }
 508   // +1 to have a NULL at the end of the array.
 509   memset(t, 0, (len + 1) * sizeof(*t));
 510 
 511   // Copy the StackTraceData objects into the new array.
 512   int trace_count = 0;
 513   for (int i = 0; i < len; i++) {
 514     const StackTraceData *stack_trace = copier.get(i);
 515     if (stack_trace != NULL) {
 516       jvmtiStackTrace *to = &t[trace_count];
 517       if (!deep_copy(to, stack_trace)) {
 518         continue;
 519       }
 520       trace_count++;
 521     }
 522   }
 523 
 524   traces->stack_traces = t;
 525   traces->trace_count = trace_count;
 526 }
 527 
 528 void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) {
 529   StackTraceData *new_trace = new StackTraceData();
 530   *new_trace = trace;
 531 
 532   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 533 
 534   // Accepted is on the right of the boolean to force the store_trace to happen.
 535   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 536 
 537   if (!accepted) {
 538     // No one wanted to use it.
 539     delete new_trace;
 540   }
 541 
 542   _stats.garbage_collected_samples++;
 543 }
 544 
 545 // Delegate the initialization question to the underlying storage system.
 546 bool HeapMonitoring::initialized() {
 547   return StackTraceStorage::storage()->initialized();
 548 }
 549 
 550 // Delegate the initialization question to the underlying storage system.
 551 bool *HeapMonitoring::initialized_address() {
 552   return
 553       const_cast<bool*>(StackTraceStorage::storage()->initialized_address());
 554 }
 555 
 556 void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) {
 557   StackTraceStorage::storage()->get_all_stack_traces(traces);
 558 }
 559 
 560 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) {
 561   const jvmtiHeapSamplingStats& internal_stats =
 562       StackTraceStorage::storage()->get_heap_sampling_stats();
 563   *stats = internal_stats;
 564 }
 565 
 566 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) {
 567   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
 568 }
 569 
 570 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) {
 571   StackTraceStorage::storage()->get_garbage_stack_traces(traces);
 572 }
 573 
 574 void HeapMonitoring::release_traces(jvmtiStackTraces *traces) {
 575   jint trace_count = traces->trace_count;
 576   jvmtiStackTrace *stack_traces = traces->stack_traces;
 577 
 578   for (jint i = 0; i < trace_count; i++) {
 579     jvmtiStackTrace *current_trace = stack_traces + i;
 580     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
 581   }
 582 
 583   FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
 584   traces->trace_count = 0;
 585   traces->stack_traces = NULL;
 586 }
 587 
 588 // Invoked by the GC to clean up old stack traces and remove old arrays
 589 // of instrumentation that are still lying around.
 590 size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive,
 591                                     OopClosure *f) {
 592   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 593   return StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 594 }
 595 
 596 void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) {
 597   // Ignore if already enabled.
 598   if (_enabled) {
 599     return;
 600   }
 601 
 602   _monitoring_rate = monitoring_rate;
 603 
 604   // Initalize and reset.
 605   StackTraceStorage::initialize_stack_trace_storage(max_storage);
 606 
 607   // Populate the lookup table for fast_log2.
 608   // This approximates the log2 curve with a step function.
 609   // Steps have height equal to log2 of the mid-point of the step.
 610   for (int i = 0; i < (1 << FastLogNumBits); i++) {
 611     double half_way = static_cast<double>(i + 0.5);
 612     _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
 613   }
 614 
 615   JavaThread *t = static_cast<JavaThread *>(Thread::current());
 616   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 617   if (_rnd == 0) {
 618     _rnd = 1;
 619   }
 620   _enabled = true;
 621 }
 622 
 623 void HeapMonitoring::stop_profiling() {
 624   _enabled = false;
 625 }
 626 
 627 // Generates a geometric variable with the specified mean (512K by default).
 628 // This is done by generating a random number between 0 and 1 and applying
 629 // the inverse cumulative distribution function for an exponential.
 630 // Specifically: Let m be the inverse of the sample rate, then
 631 // the probability distribution function is m*exp(-mx) so the CDF is
 632 // p = 1 - exp(-mx), so
 633 // q = 1 - p = exp(-mx)
 634 // log_e(q) = -mx
 635 // -log_e(q)/m = x
 636 // log_2(q) * (-log_e(2) * 1/m) = x
 637 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 638 void HeapMonitoring::pick_next_sample(size_t *ptr) {
 639   _rnd = next_random(_rnd);
 640   // Take the top 26 bits as the random number
 641   // (This plus a 1<<58 sampling bound gives a max possible step of
 642   // 5194297183973780480 bytes.  In this case,
 643   // for sample_parameter = 1<<19, max possible step is
 644   // 9448372 bytes (24 bits).
 645   const uint64_t prng_mod_power = 48;  // Number of bits in prng
 646   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 647   // under piii debug for some binaries.
 648   double q = static_cast<uint32_t>(_rnd >> (prng_mod_power - 26)) + 1.0;
 649   // Put the computed p-value through the CDF of a geometric.
 650   // For faster performance (save ~1/20th exec time), replace
 651   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 652   // The value 26.000705 is used rather than 26 to compensate
 653   // for inaccuracies in FastLog2 which otherwise result in a
 654   // negative answer.
 655   double log_val = (fast_log2(q) - 26);
 656   size_t rate = static_cast<size_t>(
 657       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 658   *ptr = rate;
 659 
 660   StackTraceStorage::storage()->accumulate_sample_rate(rate);
 661 }
 662 
 663 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
 664 #if defined(X86) || defined(PPC)
 665   JavaThread *thread = static_cast<JavaThread *>(t);
 666   if (StackTraceStorage::storage()->is_initialized()) {
 667     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 668     JavaThread *thread = static_cast<JavaThread *>(t);
 669 
 670     jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
 671     if (trace == NULL) {
 672       return;
 673     }
 674 
 675     jvmtiFrameInfo *frames =
 676         NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 677 
 678     if (frames == NULL) {
 679       FREE_C_HEAP_OBJ(trace);
 680       return;
 681     }
 682 
 683     trace->frames = frames;
 684     trace->thread_id = SharedRuntime::get_java_tid(thread);
 685     trace->size = byte_size;
 686     trace->frame_count = 0;
 687 
 688     if (thread->has_last_Java_frame()) { // just to be safe
 689       vframeStream vfst(thread, true);
 690       int count = 0;
 691       while (!vfst.at_end() && count < MaxStackDepth) {
 692         Method* m = vfst.method();
 693         frames[count].location = vfst.bci();
 694         frames[count].method = m->jmethod_id();
 695         count++;
 696 
 697         vfst.next();
 698       }
 699       trace->frame_count = count;
 700     }
 701 
 702     if (trace->frame_count> 0) {
 703       // Success!
 704       StackTraceStorage::storage()->add_trace(trace, o);
 705       return;
 706     }
 707 
 708     // Failure!
 709     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);
 710     FREE_C_HEAP_OBJ(trace);
 711     return;
 712   } else {
 713     // There is something like 64K worth of allocation before the VM
 714     // initializes.  This is just in the interests of not slowing down
 715     // startup.
 716     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 717   }
 718 #else
 719   Unimplemented();
 720 #endif
 721 }