1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/heapMonitoring.hpp"
  30 #include "runtime/orderAccess.inline.hpp"
  31 #include "runtime/vframe.hpp"
  32 
  33 static const int MaxStackDepth = 1024;
  34 
  35 // Internal data structure representing traces, used when object has been GC'd.
  36 struct StackTraceData : CHeapObj<mtInternal> {
  37   jvmtiStackTrace* trace;
  38   int references;
  39 
  40   StackTraceData(jvmtiStackTrace* t) : trace(t), references(0) {}
  41 
  42   StackTraceData() : trace(NULL), references(0) {}
  43 
  44   // StackTraceDatas are shared around the board between various lists. So
  45   // handle this by hand instead of having this in the destructor. There are
  46   // cases where the struct is on the stack but holding heap data not to be
  47   // freed.
  48   static void free_data(StackTraceData* data) {
  49     if (data->trace != NULL) {
  50       FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
  51       FREE_C_HEAP_OBJ(data->trace);
  52     }
  53     delete data;
  54   }
  55 };
  56 
  57 // Internal data structure representing traces with the oop, used while object
  58 // is live. Since this structure just passes the trace to the GC lists, it does
  59 // not handle any freeing.
  60 struct StackTraceDataWithOop : public StackTraceData {
  61   oop obj;
  62 
  63   StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) {
  64     store_oop(o);
  65   }
  66 
  67   StackTraceDataWithOop() : StackTraceData(), obj(NULL) {}
  68 
  69   oop load_oop() {
  70     return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(&obj);
  71   }
  72 
  73   void store_oop(oop value) {
  74     RootAccess<ON_PHANTOM_OOP_REF>::oop_store(&obj, value);
  75   }
  76 
  77   void clear_oop() {
  78     store_oop(reinterpret_cast<oop>(NULL));
  79   }
  80 };
  81 
  82 // Fixed size buffer for holding garbage traces.
  83 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  84  public:
  85   GarbageTracesBuffer(uint32_t size) : _size(size) {
  86     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
  87                                        size,
  88                                        mtInternal);
  89     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
  90   }
  91 
  92   virtual ~GarbageTracesBuffer() {
  93     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
  94   }
  95 
  96   StackTraceData** get_traces() const {
  97     return _garbage_traces;
  98   }
  99 
 100   bool store_trace(StackTraceData* trace) {
 101     uint32_t index;
 102     if (!select_replacement(&index)) {
 103       return false;
 104     }
 105 
 106     StackTraceData* old_data = _garbage_traces[index];
 107 
 108     if (old_data != NULL) {
 109       old_data->references--;
 110 
 111       if (old_data->references == 0) {
 112         StackTraceData::free_data(old_data);
 113       }
 114     }
 115 
 116     trace->references++;
 117     _garbage_traces[index] = trace;
 118     return true;
 119   }
 120 
 121   uint32_t size() const {
 122     return _size;
 123   }
 124 
 125  protected:
 126   // Subclasses select the trace to replace. Returns false if no replacement
 127   // is to happen, otherwise stores the index of the trace to replace in
 128   // *index.
 129   virtual bool select_replacement(uint32_t* index) = 0;
 130 
 131   const uint32_t _size;
 132 
 133  private:
 134   // The current garbage traces.  A fixed-size ring buffer.
 135   StackTraceData** _garbage_traces;
 136 };
 137 
 138 // Keep statistical sample of traces over the lifetime of the server.
 139 // When the buffer is full, replace a random entry with probability
 140 // 1/samples_seen. This strategy tends towards preserving the most frequently
 141 // occuring traces over time.
 142 class FrequentGarbageTraces : public GarbageTracesBuffer {
 143  public:
 144   FrequentGarbageTraces(int size)
 145       : GarbageTracesBuffer(size),
 146       _garbage_traces_pos(0),
 147       _samples_seen(0) {
 148       }
 149 
 150   virtual ~FrequentGarbageTraces() {
 151   }
 152 
 153   virtual bool select_replacement(uint32_t* index) {
 154     ++_samples_seen;
 155 
 156     if (_garbage_traces_pos < _size) {
 157       *index = _garbage_traces_pos++;
 158       return true;
 159     }
 160 
 161     uint64_t random_uint64 =
 162         (static_cast<uint64_t>(::random()) << 32) | ::random();
 163 
 164     uint32_t random_index = random_uint64 % _samples_seen;
 165     if (random_index < _size) {
 166       *index = random_index;
 167       return true;
 168     }
 169 
 170     return false;
 171   }
 172 
 173  private:
 174   // The current position in the buffer as we initially fill it.
 175   uint32_t _garbage_traces_pos;
 176 
 177   uint64_t _samples_seen;
 178 };
 179 
 180 // Store most recent garbage traces.
 181 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 182  public:
 183   MostRecentGarbageTraces(int size)
 184       : GarbageTracesBuffer(size),
 185       _garbage_traces_pos(0) {
 186       }
 187 
 188   virtual ~MostRecentGarbageTraces() {
 189   }
 190 
 191   virtual bool select_replacement(uint32_t* index) {
 192     *index = _garbage_traces_pos;
 193 
 194     _garbage_traces_pos =
 195         (_garbage_traces_pos + 1) % _size;
 196 
 197     return true;
 198   }
 199 
 200  private:
 201   // The current position in the buffer.
 202   uint32_t _garbage_traces_pos;
 203 };
 204 
 205 // Each object that we profile is stored as trace with the thread_id.
 206 class StackTraceStorage : public CHeapObj<mtInternal> {
 207  public:
 208   // The function that gets called to add a trace to the list of
 209   // traces we are maintaining.
 210   void add_trace(jvmtiStackTrace* trace, oop o);
 211 
 212   // The function that gets called by the client to retrieve the list
 213   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 214   void get_all_stack_traces(jvmtiStackTraces* traces);
 215 
 216   // The function that gets called by the client to retrieve the list
 217   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 218   void get_garbage_stack_traces(jvmtiStackTraces* traces);
 219 
 220   // The function that gets called by the client to retrieve the list
 221   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 222   void get_frequent_garbage_stack_traces(jvmtiStackTraces* traces);
 223 
 224   // The function that gets called by the client to retrieve the list
 225   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 226   void get_cached_stack_traces(jvmtiStackTraces* traces);
 227 
 228   // Executes whenever weak references are traversed.  is_alive tells
 229   // you if the given oop is still reachable and live.
 230   void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 231 
 232   ~StackTraceStorage();
 233   StackTraceStorage();
 234 
 235   static StackTraceStorage* storage() {
 236     static StackTraceStorage internal_storage;
 237     return &internal_storage;
 238   }
 239 
 240   void initialize(int max_storage) {
 241     MutexLocker mu(HeapMonitorStorage_lock);
 242     allocate_storage(max_storage);
 243     memset(&_stats, 0, sizeof(_stats));
 244   }
 245 
 246   void stop() {
 247     MutexLocker mu(HeapMonitorStorage_lock);
 248     free_storage();
 249   }
 250 
 251   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 252     return _stats;
 253   }
 254 
 255   void accumulate_sample_rate(size_t rate) {
 256     _stats.sample_rate_accumulation += rate;
 257     _stats.sample_rate_count++;
 258   }
 259 
 260   bool initialized() {
 261     return OrderAccess::load_acquire(&_initialized) != 0;
 262   }
 263 
 264  private:
 265   // The traces currently sampled.
 266   GrowableArray<StackTraceDataWithOop>* _allocated_traces;
 267 
 268   // The traces currently sampled.
 269   GrowableArray<StackTraceDataWithOop>* _traces_on_last_full_gc;
 270 
 271   // Recent garbage traces.
 272   MostRecentGarbageTraces* _recent_garbage_traces;
 273 
 274   // Frequent garbage traces.
 275   FrequentGarbageTraces* _frequent_garbage_traces;
 276 
 277   // Heap Sampling statistics.
 278   jvmtiHeapSamplingStats _stats;
 279 
 280   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 281   int _max_gc_storage;
 282 
 283   static StackTraceStorage* internal_storage;
 284   int _initialized;
 285 
 286   // Support functions and classes for copying data to the external
 287   // world.
 288   class StackTraceDataCopier {
 289    public:
 290     virtual int size() const = 0;
 291     virtual const StackTraceData* get(uint32_t i) const = 0;
 292   };
 293 
 294   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 295    public:
 296     LiveStackTraceDataCopier(GrowableArray<StackTraceDataWithOop>* data) :
 297         _data(data) {}
 298     int size() const { return _data ? _data->length() : 0; }
 299     const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); }
 300 
 301    private:
 302     GrowableArray<StackTraceDataWithOop>* _data;
 303   };
 304 
 305   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 306    public:
 307     GarbageStackTraceDataCopier(StackTraceData** data, int size) :
 308         _data(data), _size(size) {}
 309     int size() const { return _size; }
 310     const StackTraceData* get(uint32_t i) const { return _data[i]; }
 311 
 312    private:
 313     StackTraceData** _data;
 314     int _size;
 315   };
 316 
 317   // Copies from StackTraceData to jvmtiStackTrace.
 318   bool deep_copy(jvmtiStackTrace* to, const StackTraceData* from);
 319 
 320   // Creates a deep copy of the list of StackTraceData.
 321   void copy_stack_traces(const StackTraceDataCopier &copier,
 322                          jvmtiStackTraces* traces);
 323 
 324   void store_garbage_trace(const StackTraceDataWithOop &trace);
 325 
 326   void free_garbage();
 327   void free_storage();
 328   void reset();
 329 
 330   void allocate_storage(int max_gc_storage);
 331 };
 332 
 333 StackTraceStorage* StackTraceStorage::internal_storage;
 334 
 335 // Statics for Sampler
 336 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 337 bool HeapMonitoring::_enabled;
 338 jint HeapMonitoring::_monitoring_rate;
 339 
 340 // Cheap random number generator
 341 uint64_t HeapMonitoring::_rnd;
 342 
 343 StackTraceStorage::StackTraceStorage() {
 344   reset();
 345 }
 346 
 347 void StackTraceStorage::reset() {
 348   _allocated_traces = NULL;
 349   _traces_on_last_full_gc = NULL;
 350   _recent_garbage_traces = NULL;
 351   _frequent_garbage_traces = NULL;
 352   _max_gc_storage = 0;
 353   OrderAccess::release_store(&_initialized, 0);
 354 }
 355 
 356 void StackTraceStorage::free_garbage() {
 357   StackTraceData** recent_garbage = NULL;
 358   uint32_t recent_size = 0;
 359 
 360   StackTraceData** frequent_garbage = NULL;
 361   uint32_t frequent_size = 0;
 362 
 363   if (_recent_garbage_traces != NULL) {
 364     recent_garbage = _recent_garbage_traces->get_traces();
 365     recent_size = _recent_garbage_traces->size();
 366   }
 367 
 368   if (_frequent_garbage_traces != NULL) {
 369     frequent_garbage = _frequent_garbage_traces->get_traces();
 370     frequent_size = _frequent_garbage_traces->size();
 371   }
 372 
 373   // Simple solution since this happens at exit.
 374   // Go through the recent and remove any that only are referenced there.
 375   for (uint32_t i = 0; i < recent_size; i++) {
 376     StackTraceData* trace = recent_garbage[i];
 377     if (trace != NULL) {
 378       trace->references--;
 379 
 380       if (trace->references == 0) {
 381         StackTraceData::free_data(trace);
 382       }
 383     }
 384   }
 385 
 386   // Then go through the frequent and remove those that are now only there.
 387   for (uint32_t i = 0; i < frequent_size; i++) {
 388     StackTraceData* trace = frequent_garbage[i];
 389     if (trace != NULL) {
 390       trace->references--;
 391 
 392       if (trace->references == 0) {
 393         StackTraceData::free_data(trace);
 394       }
 395     }
 396   }
 397 }
 398 
 399 void StackTraceStorage::free_storage() {
 400   if (!initialized()) {
 401     return;
 402   }
 403 
 404   delete _allocated_traces;
 405   delete _traces_on_last_full_gc;
 406 
 407   free_garbage();
 408   delete _recent_garbage_traces;
 409   delete _frequent_garbage_traces;
 410 
 411   reset();
 412 }
 413 
 414 StackTraceStorage::~StackTraceStorage() {
 415   free_storage();
 416 }
 417 
 418 void StackTraceStorage::allocate_storage(int max_gc_storage) {
 419   // In case multiple threads got locked and then 1 by 1 got through.
 420   if (initialized()) {
 421     return;
 422   }
 423 
 424   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 425       GrowableArray<StackTraceDataWithOop>(128, true);
 426   _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
 427       GrowableArray<StackTraceDataWithOop>(128, true);
 428 
 429   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 430   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 431 
 432   _max_gc_storage = max_gc_storage;
 433   OrderAccess::release_store(&_initialized, 1);
 434 }
 435 
 436 void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) {
 437   MutexLocker mu(HeapMonitorStorage_lock);
 438   // Last minute check on initialization here in case:
 439   //   Between the moment object_alloc_do_sample's check for initialization
 440   //   and now, there was a stop() that deleted the data.
 441   if (initialized()) {
 442     StackTraceDataWithOop new_data(trace, o);
 443     _stats.sample_count++;
 444     _stats.stack_depth_accumulation += trace->frame_count;
 445     _allocated_traces->append(new_data);
 446   }
 447 }
 448 
 449 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
 450                                      OopClosure* f) {
 451   size_t count = 0;
 452   if (initialized()) {
 453     int len = _allocated_traces->length();
 454 
 455     _traces_on_last_full_gc->clear();
 456 
 457     // Compact the oop traces.  Moves the live oops to the beginning of the
 458     // growable array, potentially overwriting the dead ones.
 459     for (int i = 0; i < len; i++) {
 460       StackTraceDataWithOop &trace = _allocated_traces->at(i);
 461       oop value = trace.load_oop();
 462       if (is_alive->do_object_b(value)) {
 463         // Update the oop to point to the new object if it is still alive.
 464         f->do_oop(&(trace.obj));
 465 
 466         // Copy the old trace, if it is still live.
 467         _allocated_traces->at_put(count++, trace);
 468 
 469         // Store the live trace in a cache, to be served up on /heapz.
 470         _traces_on_last_full_gc->append(trace);
 471       } else {
 472         trace.clear_oop();
 473 
 474         // If the old trace is no longer live, add it to the list of
 475         // recently collected garbage.
 476         store_garbage_trace(trace);
 477       }
 478     }
 479 
 480     // Zero out remaining array elements.  Even though the call to trunc_to
 481     // below truncates these values, zeroing them out is good practice.
 482     StackTraceDataWithOop zero_trace;
 483     for (int i = count; i < len; i++) {
 484       _allocated_traces->at_put(i, zero_trace);
 485     }
 486 
 487     // Set the array's length to the number of live elements.
 488     _allocated_traces->trunc_to(count);
 489   }
 490 
 491   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 492 }
 493 
 494 bool StackTraceStorage::deep_copy(jvmtiStackTrace* to,
 495                                   const StackTraceData* from) {
 496   const jvmtiStackTrace* src = from->trace;
 497   *to = *src;
 498 
 499   to->frames =
 500       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
 501 
 502   if (to->frames == NULL) {
 503     return false;
 504   }
 505 
 506   memcpy(to->frames,
 507          src->frames,
 508          sizeof(jvmtiFrameInfo) * src->frame_count);
 509   return true;
 510 }
 511 
 512 // Called by the outside world; returns a copy of the stack traces
 513 // (because we could be replacing them as the user handles them).
 514 // The array is secretly null-terminated (to make it easier to reclaim).
 515 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) {
 516   if (!_allocated_traces) {
 517     traces->stack_traces = NULL;
 518     traces->trace_count = 0;
 519     return;
 520   }
 521 
 522   LiveStackTraceDataCopier copier(_allocated_traces);
 523   copy_stack_traces(copier, traces);
 524 }
 525 
 526 // See comment on get_all_stack_traces
 527 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) {
 528   if (!_recent_garbage_traces) {
 529     traces->stack_traces = NULL;
 530     traces->trace_count = 0;
 531     return;
 532   }
 533 
 534   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 535                                      _recent_garbage_traces->size());
 536   copy_stack_traces(copier, traces);
 537 }
 538 
 539 // See comment on get_all_stack_traces
 540 void StackTraceStorage::get_frequent_garbage_stack_traces(
 541     jvmtiStackTraces* traces) {
 542   if (!_frequent_garbage_traces) {
 543     traces->stack_traces = NULL;
 544     traces->trace_count = 0;
 545     return;
 546   }
 547 
 548   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 549                                      _frequent_garbage_traces->size());
 550   copy_stack_traces(copier, traces);
 551 }
 552 
 553 // See comment on get_all_stack_traces
 554 void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) {
 555   if (!_traces_on_last_full_gc) {
 556     traces->stack_traces = NULL;
 557     traces->trace_count = 0;
 558     return;
 559   }
 560 
 561   LiveStackTraceDataCopier copier(_traces_on_last_full_gc);
 562   copy_stack_traces(copier, traces);
 563 }
 564 
 565 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
 566                                           jvmtiStackTraces* traces) {
 567   MutexLocker mu(HeapMonitorStorage_lock);
 568   int len = copier.size();
 569 
 570   // Create a new array to store the StackTraceData objects.
 571   // + 1 for a NULL at the end.
 572   jvmtiStackTrace* t =
 573       NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
 574   if (t == NULL) {
 575     traces->stack_traces = NULL;
 576     traces->trace_count = 0;
 577     return;
 578   }
 579   // +1 to have a NULL at the end of the array.
 580   memset(t, 0, (len + 1) * sizeof(*t));
 581 
 582   // Copy the StackTraceData objects into the new array.
 583   int trace_count = 0;
 584   for (int i = 0; i < len; i++) {
 585     const StackTraceData* stack_trace = copier.get(i);
 586     if (stack_trace != NULL) {
 587       jvmtiStackTrace* to = &t[trace_count];
 588       if (!deep_copy(to, stack_trace)) {
 589         continue;
 590       }
 591       trace_count++;
 592     }
 593   }
 594 
 595   traces->stack_traces = t;
 596   traces->trace_count = trace_count;
 597 }
 598 
 599 void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) {
 600   StackTraceData* new_trace = new StackTraceData(trace.trace);
 601 
 602   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 603 
 604   // Accepted is on the right of the boolean to force the store_trace to happen.
 605   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 606 
 607   if (!accepted) {
 608     // No one wanted to use it.
 609     delete new_trace;
 610   }
 611 
 612   _stats.garbage_collected_samples++;
 613 }
 614 
 615 void HeapMonitoring::get_live_traces(jvmtiStackTraces* traces) {
 616   StackTraceStorage::storage()->get_all_stack_traces(traces);
 617 }
 618 
 619 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) {
 620   const jvmtiHeapSamplingStats& internal_stats =
 621       StackTraceStorage::storage()->get_heap_sampling_stats();
 622   *stats = internal_stats;
 623 }
 624 
 625 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces* traces) {
 626   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
 627 }
 628 
 629 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces* traces) {
 630   StackTraceStorage::storage()->get_garbage_stack_traces(traces);
 631 }
 632 
 633 void HeapMonitoring::get_cached_traces(jvmtiStackTraces* traces) {
 634   StackTraceStorage::storage()->get_cached_stack_traces(traces);
 635 }
 636 
 637 void HeapMonitoring::release_traces(jvmtiStackTraces* traces) {
 638   jint trace_count = traces->trace_count;
 639   jvmtiStackTrace* stack_traces = traces->stack_traces;
 640 
 641   for (jint i = 0; i < trace_count; i++) {
 642     jvmtiStackTrace* current_trace = stack_traces + i;
 643     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
 644   }
 645 
 646   FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
 647   traces->trace_count = 0;
 648   traces->stack_traces = NULL;
 649 }
 650 
 651 // Invoked by the GC to clean up old stack traces and remove old arrays
 652 // of instrumentation that are still lying around.
 653 void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 654   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 655   StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 656 }
 657 
 658 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
 659                                           jint max_gc_storage) {
 660   MutexLocker mu(HeapMonitor_lock);
 661   // Ignore if already enabled.
 662   if (_enabled) {
 663     return;
 664   }
 665 
 666   _monitoring_rate = monitoring_rate;
 667 
 668   // Populate the lookup table for fast_log2.
 669   // This approximates the log2 curve with a step function.
 670   // Steps have height equal to log2 of the mid-point of the step.
 671   for (int i = 0; i < (1 << FastLogNumBits); i++) {
 672     double half_way = static_cast<double>(i + 0.5);
 673     _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
 674   }
 675 
 676   JavaThread* t = static_cast<JavaThread*>(Thread::current());
 677   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 678   if (_rnd == 0) {
 679     _rnd = 1;
 680   }
 681 
 682   StackTraceStorage::storage()->initialize(max_gc_storage);
 683   _enabled = true;
 684 }
 685 
 686 void HeapMonitoring::stop_profiling() {
 687   MutexLocker mu(HeapMonitor_lock);
 688   StackTraceStorage::storage()->stop();
 689   _enabled = false;
 690 }
 691 
 692 // Generates a geometric variable with the specified mean (512K by default).
 693 // This is done by generating a random number between 0 and 1 and applying
 694 // the inverse cumulative distribution function for an exponential.
 695 // Specifically: Let m be the inverse of the sample rate, then
 696 // the probability distribution function is m*exp(-mx) so the CDF is
 697 // p = 1 - exp(-mx), so
 698 // q = 1 - p = exp(-mx)
 699 // log_e(q) = -mx
 700 // -log_e(q)/m = x
 701 // log_2(q) * (-log_e(2) * 1/m) = x
 702 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 703 void HeapMonitoring::pick_next_sample(size_t* ptr) {
 704   _rnd = next_random(_rnd);
 705   // Take the top 26 bits as the random number
 706   // (This plus a 1<<58 sampling bound gives a max possible step of
 707   // 5194297183973780480 bytes.  In this case,
 708   // for sample_parameter = 1<<19, max possible step is
 709   // 9448372 bytes (24 bits).
 710   const uint64_t PrngModPower = 48;  // Number of bits in prng
 711   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 712   // under piii debug for some binaries.
 713   double q = static_cast<uint32_t>(_rnd >> (PrngModPower - 26)) + 1.0;
 714   // Put the computed p-value through the CDF of a geometric.
 715   // For faster performance (save ~1/20th exec time), replace
 716   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 717   // The value 26.000705 is used rather than 26 to compensate
 718   // for inaccuracies in FastLog2 which otherwise result in a
 719   // negative answer.
 720   double log_val = (fast_log2(q) - 26);
 721   size_t rate = static_cast<size_t>(
 722       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 723   *ptr = rate;
 724 
 725   StackTraceStorage::storage()->accumulate_sample_rate(rate);
 726 }
 727 
 728 void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, intx byte_size) {
 729   JavaThread* thread = static_cast<JavaThread*>(t);
 730   if (StackTraceStorage::storage()->initialized()) {
 731     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 732     JavaThread* thread = static_cast<JavaThread*>(t);
 733 
 734     jvmtiStackTrace* trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
 735     if (trace == NULL) {
 736       return;
 737     }
 738 
 739     jvmtiFrameInfo* frames =
 740         NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 741 
 742     if (frames == NULL) {
 743       FREE_C_HEAP_OBJ(trace);
 744       return;
 745     }
 746 
 747     trace->frames = frames;
 748     trace->thread_id = SharedRuntime::get_java_tid(thread);
 749     trace->size = byte_size;
 750     trace->frame_count = 0;
 751 
 752     if (thread->has_last_Java_frame()) { // just to be safe
 753       vframeStream vfst(thread, true);
 754       int count = 0;
 755       while (!vfst.at_end() && count < MaxStackDepth) {
 756         Method* m = vfst.method();
 757         frames[count].location = vfst.bci();
 758         frames[count].method = m->jmethod_id();
 759         count++;
 760 
 761         vfst.next();
 762       }
 763       trace->frame_count = count;
 764     }
 765 
 766     if (trace->frame_count> 0) {
 767       // Success!
 768       StackTraceStorage::storage()->add_trace(trace, o);
 769       return;
 770     }
 771 
 772     // Failure!
 773     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);
 774     FREE_C_HEAP_OBJ(trace);
 775   }
 776 }