1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/heapMonitoring.hpp"
  30 #include "runtime/orderAccess.inline.hpp"
  31 #include "runtime/vframe.hpp"
  32 
  33 static const int MaxStackDepth = 1024;
  34 
  35 // Internal data structure representing traces, used when object has been GC'd.
  36 class StackTraceData : public CHeapObj<mtInternal> {
  37  private:
  38   jvmtiStackTrace* _trace;
  39   int _references;
  40 
  41  public:
  42   StackTraceData(jvmtiStackTrace* t) : _trace(t), _references(0) {}
  43 
  44   void increment_reference_count() {
  45     _references++;
  46   }
  47 
  48   jvmtiStackTrace* get_trace() const {
  49     return _trace;
  50   }
  51 
  52   static void unreference_and_free(StackTraceData* data) {
  53     if (!data) {
  54       return;
  55     }
  56 
  57     data->_references--;
  58     if (data->_references == 0) {
  59       if (data->_trace != NULL) {
  60         FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->_trace->frames);
  61         FREE_C_HEAP_OBJ(data->_trace);
  62       }
  63       delete data;
  64     }
  65   }
  66 };
  67 
  68 // Internal data structure representing traces with the oop, used while object
  69 // is live. Since this structure just passes the trace to the GC lists, it does
  70 // not handle any freeing.
  71 class StackTraceDataWithOop : public StackTraceData {
  72  private:
  73   oop _obj;
  74 
  75  public:
  76   StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) {
  77     store_oop(o);
  78   }
  79 
  80   StackTraceDataWithOop() : StackTraceData(NULL), _obj(NULL) {
  81   }
  82 
  83   oop load_oop() {
  84     return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(&_obj);
  85   }
  86 
  87   oop* get_oop_addr() {
  88     return &_obj;
  89   }
  90 
  91   void store_oop(oop value) {
  92     RootAccess<ON_PHANTOM_OOP_REF>::oop_store(&_obj, value);
  93   }
  94 
  95   void clear_oop() {
  96     store_oop(reinterpret_cast<oop>(NULL));
  97   }
  98 };
  99 
 100 // Fixed size buffer for holding garbage traces.
 101 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
 102  public:
 103   GarbageTracesBuffer(uint32_t size) : _size(size) {
 104     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
 105                                        size,
 106                                        mtInternal);
 107     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
 108   }
 109 
 110   virtual ~GarbageTracesBuffer() {
 111     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
 112   }
 113 
 114   StackTraceData** get_traces() const {
 115     return _garbage_traces;
 116   }
 117 
 118   bool store_trace(StackTraceData* trace) {
 119     uint32_t index;
 120     if (!select_replacement(&index)) {
 121       return false;
 122     }
 123 
 124     StackTraceData* old_data = _garbage_traces[index];
 125     StackTraceData::unreference_and_free(old_data);
 126 
 127     trace->increment_reference_count();
 128     _garbage_traces[index] = trace;
 129     return true;
 130   }
 131 
 132   uint32_t size() const {
 133     return _size;
 134   }
 135 
 136  protected:
 137   // Subclasses select the trace to replace. Returns false if no replacement
 138   // is to happen, otherwise stores the index of the trace to replace in
 139   // *index.
 140   virtual bool select_replacement(uint32_t* index) = 0;
 141 
 142   const uint32_t _size;
 143 
 144  private:
 145   // The current garbage traces.  A fixed-size ring buffer.
 146   StackTraceData** _garbage_traces;
 147 };
 148 
 149 // Keep statistical sample of traces over the lifetime of the server.
 150 // When the buffer is full, replace a random entry with probability
 151 // 1/samples_seen. This strategy tends towards preserving the most frequently
 152 // occuring traces over time.
 153 class FrequentGarbageTraces : public GarbageTracesBuffer {
 154  public:
 155   FrequentGarbageTraces(int size)
 156       : GarbageTracesBuffer(size),
 157       _garbage_traces_pos(0),
 158       _samples_seen(0) {
 159       }
 160 
 161   virtual ~FrequentGarbageTraces() {
 162   }
 163 
 164   virtual bool select_replacement(uint32_t* index) {
 165     ++_samples_seen;
 166 
 167     if (_garbage_traces_pos < _size) {
 168       *index = _garbage_traces_pos++;
 169       return true;
 170     }
 171 
 172     uint64_t random_uint64 =
 173         (static_cast<uint64_t>(::random()) << 32) | ::random();
 174 
 175     uint32_t random_index = random_uint64 % _samples_seen;
 176     if (random_index < _size) {
 177       *index = random_index;
 178       return true;
 179     }
 180 
 181     return false;
 182   }
 183 
 184  private:
 185   // The current position in the buffer as we initially fill it.
 186   uint32_t _garbage_traces_pos;
 187 
 188   uint64_t _samples_seen;
 189 };
 190 
 191 // Store most recent garbage traces.
 192 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 193  public:
 194   MostRecentGarbageTraces(int size)
 195       : GarbageTracesBuffer(size),
 196       _garbage_traces_pos(0) {
 197       }
 198 
 199   virtual ~MostRecentGarbageTraces() {
 200   }
 201 
 202   virtual bool select_replacement(uint32_t* index) {
 203     *index = _garbage_traces_pos;
 204 
 205     _garbage_traces_pos =
 206         (_garbage_traces_pos + 1) % _size;
 207 
 208     return true;
 209   }
 210 
 211  private:
 212   // The current position in the buffer.
 213   uint32_t _garbage_traces_pos;
 214 };
 215 
 216 // Each object that we profile is stored as trace with the thread_id.
 217 class StackTraceStorage : public CHeapObj<mtInternal> {
 218  public:
 219   // The function that gets called to add a trace to the list of
 220   // traces we are maintaining.
 221   void add_trace(jvmtiStackTrace* trace, oop o);
 222 
 223   // The function that gets called by the client to retrieve the list
 224   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 225   void get_all_stack_traces(jvmtiStackTraces* traces);
 226 
 227   // The function that gets called by the client to retrieve the list
 228   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 229   void get_garbage_stack_traces(jvmtiStackTraces* traces);
 230 
 231   // The function that gets called by the client to retrieve the list
 232   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 233   void get_frequent_garbage_stack_traces(jvmtiStackTraces* traces);
 234 
 235   // The function that gets called by the client to retrieve the list
 236   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 237   void get_cached_stack_traces(jvmtiStackTraces* traces);
 238 
 239   // Executes whenever weak references are traversed.  is_alive tells
 240   // you if the given oop is still reachable and live.
 241   void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 242 
 243   ~StackTraceStorage();
 244   StackTraceStorage();
 245 
 246   static StackTraceStorage* storage() {
 247     static StackTraceStorage internal_storage;
 248     return &internal_storage;
 249   }
 250 
 251   void initialize(int max_storage) {
 252     MutexLocker mu(HeapMonitorStorage_lock);
 253     allocate_storage(max_storage);
 254   }
 255 
 256   void stop() {
 257     MutexLocker mu(HeapMonitorStorage_lock);
 258     free_storage();
 259   }
 260 
 261   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 262     MutexLocker mu(HeapMonitorStorage_lock);
 263     return _stats;
 264   }
 265 
 266   void accumulate_sample_rate(size_t rate) {
 267     MutexLocker mu(HeapMonitorStorage_lock);
 268     _stats.sample_rate_accumulation += rate;
 269     _stats.sample_rate_count++;
 270   }
 271 
 272   bool initialized() {
 273     return OrderAccess::load_acquire(&_initialized) != 0;
 274   }
 275 
 276  private:
 277   // The traces currently sampled.
 278   GrowableArray<StackTraceDataWithOop>* _allocated_traces;
 279 
 280   // The traces currently sampled.
 281   GrowableArray<StackTraceDataWithOop>* _traces_on_last_full_gc;
 282 
 283   // Recent garbage traces.
 284   MostRecentGarbageTraces* _recent_garbage_traces;
 285 
 286   // Frequent garbage traces.
 287   FrequentGarbageTraces* _frequent_garbage_traces;
 288 
 289   // Heap Sampling statistics.
 290   jvmtiHeapSamplingStats _stats;
 291 
 292   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 293   int _max_gc_storage;
 294 
 295   static StackTraceStorage* internal_storage;
 296   int _initialized;
 297 
 298   // Support functions and classes for copying data to the external
 299   // world.
 300   class StackTraceDataCopier {
 301    public:
 302     virtual int size() const = 0;
 303     virtual const StackTraceData* get(uint32_t i) const = 0;
 304   };
 305 
 306   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 307    public:
 308     LiveStackTraceDataCopier(GrowableArray<StackTraceDataWithOop>* data) :
 309         _data(data) {}
 310     int size() const { return _data ? _data->length() : 0; }
 311     const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); }
 312 
 313    private:
 314     GrowableArray<StackTraceDataWithOop>* _data;
 315   };
 316 
 317   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 318    public:
 319     GarbageStackTraceDataCopier(StackTraceData** data, int size) :
 320         _data(data), _size(size) {}
 321     int size() const { return _size; }
 322     const StackTraceData* get(uint32_t i) const { return _data[i]; }
 323 
 324    private:
 325     StackTraceData** _data;
 326     int _size;
 327   };
 328 
 329   // Copies from StackTraceData to jvmtiStackTrace.
 330   bool deep_copy(jvmtiStackTrace* to, const StackTraceData* from);
 331 
 332   // Creates a deep copy of the list of StackTraceData.
 333   void copy_stack_traces(const StackTraceDataCopier &copier,
 334                          jvmtiStackTraces* traces);
 335 
 336   void store_garbage_trace(const StackTraceDataWithOop &trace);
 337 
 338   void free_garbage();
 339   void free_storage();
 340   void reset();
 341 
 342   void allocate_storage(int max_gc_storage);
 343 };
 344 
 345 StackTraceStorage* StackTraceStorage::internal_storage;
 346 
 347 // Statics for Sampler
 348 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 349 int HeapMonitoring::_enabled;
 350 jint HeapMonitoring::_monitoring_rate;
 351 
 352 // Cheap random number generator
 353 uint64_t HeapMonitoring::_rnd;
 354 
 355 StackTraceStorage::StackTraceStorage() {
 356   reset();
 357 }
 358 
 359 void StackTraceStorage::reset() {
 360   _allocated_traces = NULL;
 361   _traces_on_last_full_gc = NULL;
 362   _recent_garbage_traces = NULL;
 363   _frequent_garbage_traces = NULL;
 364   _max_gc_storage = 0;
 365   OrderAccess::release_store(&_initialized, 0);
 366 }
 367 
 368 void StackTraceStorage::free_garbage() {
 369   StackTraceData** recent_garbage = NULL;
 370   uint32_t recent_size = 0;
 371 
 372   StackTraceData** frequent_garbage = NULL;
 373   uint32_t frequent_size = 0;
 374 
 375   if (_recent_garbage_traces != NULL) {
 376     recent_garbage = _recent_garbage_traces->get_traces();
 377     recent_size = _recent_garbage_traces->size();
 378   }
 379 
 380   if (_frequent_garbage_traces != NULL) {
 381     frequent_garbage = _frequent_garbage_traces->get_traces();
 382     frequent_size = _frequent_garbage_traces->size();
 383   }
 384 
 385   // Simple solution since this happens at exit.
 386   // Go through the recent and remove any that only are referenced there.
 387   for (uint32_t i = 0; i < recent_size; i++) {
 388     StackTraceData::unreference_and_free(recent_garbage[i]);
 389   }
 390 
 391   // Then go through the frequent and remove those that are now only there.
 392   for (uint32_t i = 0; i < frequent_size; i++) {
 393     StackTraceData::unreference_and_free(frequent_garbage[i]);
 394   }
 395 }
 396 
 397 void StackTraceStorage::free_storage() {
 398   if (!initialized()) {
 399     return;
 400   }
 401 
 402   delete _allocated_traces;
 403   delete _traces_on_last_full_gc;
 404 
 405   free_garbage();
 406   delete _recent_garbage_traces;
 407   delete _frequent_garbage_traces;
 408 
 409   reset();
 410 }
 411 
 412 StackTraceStorage::~StackTraceStorage() {
 413   MutexLocker mu(HeapMonitorStorage_lock);
 414   free_storage();
 415 }
 416 
 417 void StackTraceStorage::allocate_storage(int max_gc_storage) {
 418   // In case multiple threads got locked and then 1 by 1 got through.
 419   if (initialized()) {
 420     return;
 421   }
 422 
 423   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 424       GrowableArray<StackTraceDataWithOop>(128, true);
 425   _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
 426       GrowableArray<StackTraceDataWithOop>(128, true);
 427 
 428   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 429   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 430 
 431   _max_gc_storage = max_gc_storage;
 432   memset(&_stats, 0, sizeof(_stats));
 433   OrderAccess::release_store(&_initialized, 1);
 434 }
 435 
 436 void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) {
 437   MutexLocker mu(HeapMonitorStorage_lock);
 438   // Last minute check on initialization here in case:
 439   //   Between the moment object_alloc_do_sample's check for initialization
 440   //   and now, there was a stop() that deleted the data.
 441   if (initialized()) {
 442     StackTraceDataWithOop new_data(trace, o);
 443     _stats.sample_count++;
 444     _stats.stack_depth_accumulation += trace->frame_count;
 445     _allocated_traces->append(new_data);
 446   }
 447 }
 448 
 449 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
 450                                      OopClosure* f) {
 451   size_t count = 0;
 452   if (initialized()) {
 453     int len = _allocated_traces->length();
 454 
 455     _traces_on_last_full_gc->clear();
 456 
 457     // Compact the oop traces.  Moves the live oops to the beginning of the
 458     // growable array, potentially overwriting the dead ones.
 459     for (int i = 0; i < len; i++) {
 460       StackTraceDataWithOop &trace = _allocated_traces->at(i);
 461       oop value = trace.load_oop();
 462       if (is_alive->do_object_b(value)) {
 463         // Update the oop to point to the new object if it is still alive.
 464         f->do_oop(trace.get_oop_addr());
 465 
 466         // Copy the old trace, if it is still live.
 467         _allocated_traces->at_put(count++, trace);
 468 
 469         // Store the live trace in a cache, to be served up on /heapz.
 470         _traces_on_last_full_gc->append(trace);
 471       } else {
 472         trace.clear_oop();
 473 
 474         // If the old trace is no longer live, add it to the list of
 475         // recently collected garbage.
 476         store_garbage_trace(trace);
 477       }
 478     }
 479 
 480     // Zero out remaining array elements.  Even though the call to trunc_to
 481     // below truncates these values, zeroing them out is good practice.
 482     StackTraceDataWithOop zero_trace;
 483     for (int i = count; i < len; i++) {
 484       _allocated_traces->at_put(i, zero_trace);
 485     }
 486 
 487     // Set the array's length to the number of live elements.
 488     _allocated_traces->trunc_to(count);
 489   }
 490 
 491   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 492 }
 493 
 494 bool StackTraceStorage::deep_copy(jvmtiStackTrace* to,
 495                                   const StackTraceData* from) {
 496   const jvmtiStackTrace* src = from->get_trace();
 497   *to = *src;
 498 
 499   to->frames =
 500       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
 501 
 502   if (to->frames == NULL) {
 503     return false;
 504   }
 505 
 506   memcpy(to->frames,
 507          src->frames,
 508          sizeof(jvmtiFrameInfo) * src->frame_count);
 509   return true;
 510 }
 511 
 512 // Called by the outside world; returns a copy of the stack traces
 513 // (because we could be replacing them as the user handles them).
 514 // The array is secretly null-terminated (to make it easier to reclaim).
 515 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) {
 516   MutexLocker mu(HeapMonitorStorage_lock);
 517   if (!_allocated_traces) {
 518     traces->stack_traces = NULL;
 519     traces->trace_count = 0;
 520     return;
 521   }
 522 
 523   LiveStackTraceDataCopier copier(_allocated_traces);
 524   copy_stack_traces(copier, traces);
 525 }
 526 
 527 // See comment on get_all_stack_traces
 528 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) {
 529   MutexLocker mu(HeapMonitorStorage_lock);
 530   if (!_recent_garbage_traces) {
 531     traces->stack_traces = NULL;
 532     traces->trace_count = 0;
 533     return;
 534   }
 535 
 536   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 537                                      _recent_garbage_traces->size());
 538   copy_stack_traces(copier, traces);
 539 }
 540 
 541 // See comment on get_all_stack_traces
 542 void StackTraceStorage::get_frequent_garbage_stack_traces(
 543     jvmtiStackTraces* traces) {
 544   MutexLocker mu(HeapMonitorStorage_lock);
 545   if (!_frequent_garbage_traces) {
 546     traces->stack_traces = NULL;
 547     traces->trace_count = 0;
 548     return;
 549   }
 550 
 551   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 552                                      _frequent_garbage_traces->size());
 553   copy_stack_traces(copier, traces);
 554 }
 555 
 556 // See comment on get_all_stack_traces
 557 void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) {
 558   MutexLocker mu(HeapMonitorStorage_lock);
 559   if (!_traces_on_last_full_gc) {
 560     traces->stack_traces = NULL;
 561     traces->trace_count = 0;
 562     return;
 563   }
 564 
 565   LiveStackTraceDataCopier copier(_traces_on_last_full_gc);
 566   copy_stack_traces(copier, traces);
 567 }
 568 
 569 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
 570                                           jvmtiStackTraces* traces) {
 571   int len = copier.size();
 572 
 573   // Create a new array to store the StackTraceData objects.
 574   // + 1 for a NULL at the end.
 575   jvmtiStackTrace* t =
 576       NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
 577   if (t == NULL) {
 578     traces->stack_traces = NULL;
 579     traces->trace_count = 0;
 580     return;
 581   }
 582   // +1 to have a NULL at the end of the array.
 583   memset(t, 0, (len + 1) * sizeof(*t));
 584 
 585   // Copy the StackTraceData objects into the new array.
 586   int trace_count = 0;
 587   for (int i = 0; i < len; i++) {
 588     const StackTraceData* stack_trace = copier.get(i);
 589     if (stack_trace != NULL) {
 590       jvmtiStackTrace* to = &t[trace_count];
 591       if (!deep_copy(to, stack_trace)) {
 592         continue;
 593       }
 594       trace_count++;
 595     }
 596   }
 597 
 598   traces->stack_traces = t;
 599   traces->trace_count = trace_count;
 600 }
 601 
 602 void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) {
 603   StackTraceData* new_trace = new StackTraceData(trace.get_trace());
 604 
 605   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 606 
 607   // Accepted is on the right of the boolean to force the store_trace to happen.
 608   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 609 
 610   if (!accepted) {
 611     // No one wanted to use it.
 612     delete new_trace;
 613   }
 614 
 615   _stats.garbage_collected_samples++;
 616 }
 617 
 618 void HeapMonitoring::get_live_traces(jvmtiStackTraces* traces) {
 619   StackTraceStorage::storage()->get_all_stack_traces(traces);
 620 }
 621 
 622 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) {
 623   const jvmtiHeapSamplingStats& internal_stats =
 624       StackTraceStorage::storage()->get_heap_sampling_stats();
 625   *stats = internal_stats;
 626 }
 627 
 628 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces* traces) {
 629   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
 630 }
 631 
 632 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces* traces) {
 633   StackTraceStorage::storage()->get_garbage_stack_traces(traces);
 634 }
 635 
 636 void HeapMonitoring::get_cached_traces(jvmtiStackTraces* traces) {
 637   StackTraceStorage::storage()->get_cached_stack_traces(traces);
 638 }
 639 
 640 void HeapMonitoring::release_traces(jvmtiStackTraces* traces) {
 641   jint trace_count = traces->trace_count;
 642   jvmtiStackTrace* stack_traces = traces->stack_traces;
 643 
 644   for (jint i = 0; i < trace_count; i++) {
 645     jvmtiStackTrace* current_trace = stack_traces + i;
 646     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
 647   }
 648 
 649   FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
 650   traces->trace_count = 0;
 651   traces->stack_traces = NULL;
 652 }
 653 
 654 // Invoked by the GC to clean up old stack traces and remove old arrays
 655 // of instrumentation that are still lying around.
 656 void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 657   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 658   StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 659 }
 660 
 661 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
 662                                           jint max_gc_storage) {
 663   MutexLocker mu(HeapMonitor_lock);
 664   // Ignore if already enabled.
 665   if (enabled()) {
 666     return;
 667   }
 668 
 669   _monitoring_rate = monitoring_rate;
 670 
 671   // Populate the lookup table for fast_log2.
 672   // This approximates the log2 curve with a step function.
 673   // Steps have height equal to log2 of the mid-point of the step.
 674   for (int i = 0; i < (1 << FastLogNumBits); i++) {
 675     double half_way = static_cast<double>(i + 0.5);
 676     _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
 677   }
 678 
 679   JavaThread* t = static_cast<JavaThread*>(Thread::current());
 680   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 681   if (_rnd == 0) {
 682     _rnd = 1;
 683   }
 684 
 685   StackTraceStorage::storage()->initialize(max_gc_storage);
 686   OrderAccess::release_store(&_enabled, 1);
 687 }
 688 
 689 void HeapMonitoring::stop_profiling() {
 690   MutexLocker mu(HeapMonitor_lock);
 691 
 692   if (enabled()) {
 693     StackTraceStorage::storage()->stop();
 694     OrderAccess::release_store(&_enabled, 0);
 695   }
 696 }
 697 
 698 // Generates a geometric variable with the specified mean (512K by default).
 699 // This is done by generating a random number between 0 and 1 and applying
 700 // the inverse cumulative distribution function for an exponential.
 701 // Specifically: Let m be the inverse of the sample rate, then
 702 // the probability distribution function is m*exp(-mx) so the CDF is
 703 // p = 1 - exp(-mx), so
 704 // q = 1 - p = exp(-mx)
 705 // log_e(q) = -mx
 706 // -log_e(q)/m = x
 707 // log_2(q) * (-log_e(2) * 1/m) = x
 708 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 709 void HeapMonitoring::pick_next_sample(size_t* ptr) {
 710   _rnd = next_random(_rnd);
 711   // Take the top 26 bits as the random number
 712   // (This plus a 1<<58 sampling bound gives a max possible step of
 713   // 5194297183973780480 bytes.  In this case,
 714   // for sample_parameter = 1<<19, max possible step is
 715   // 9448372 bytes (24 bits).
 716   const uint64_t PrngModPower = 48;  // Number of bits in prng
 717   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 718   // under piii debug for some binaries.
 719   double q = static_cast<uint32_t>(_rnd >> (PrngModPower - 26)) + 1.0;
 720   // Put the computed p-value through the CDF of a geometric.
 721   // For faster performance (save ~1/20th exec time), replace
 722   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 723   // The value 26.000705 is used rather than 26 to compensate
 724   // for inaccuracies in FastLog2 which otherwise result in a
 725   // negative answer.
 726   double log_val = (fast_log2(q) - 26);
 727   size_t rate = static_cast<size_t>(
 728       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 729   *ptr = rate;
 730 
 731   StackTraceStorage::storage()->accumulate_sample_rate(rate);
 732 }
 733 
 734 void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, intx byte_size) {
 735   JavaThread* thread = static_cast<JavaThread*>(t);
 736   if (StackTraceStorage::storage()->initialized()) {
 737     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 738     JavaThread* thread = static_cast<JavaThread*>(t);
 739 
 740     jvmtiStackTrace* trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
 741     if (trace == NULL) {
 742       return;
 743     }
 744 
 745     jvmtiFrameInfo* frames =
 746         NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 747 
 748     if (frames == NULL) {
 749       FREE_C_HEAP_OBJ(trace);
 750       return;
 751     }
 752 
 753     trace->frames = frames;
 754     trace->thread_id = SharedRuntime::get_java_tid(thread);
 755     trace->size = byte_size;
 756     trace->frame_count = 0;
 757 
 758     if (thread->has_last_Java_frame()) { // just to be safe
 759       vframeStream vfst(thread, true);
 760       int count = 0;
 761       while (!vfst.at_end() && count < MaxStackDepth) {
 762         Method* m = vfst.method();
 763         frames[count].location = vfst.bci();
 764         frames[count].method = m->jmethod_id();
 765         count++;
 766 
 767         vfst.next();
 768       }
 769       trace->frame_count = count;
 770     }
 771 
 772     if (trace->frame_count> 0) {
 773       // Success!
 774       StackTraceStorage::storage()->add_trace(trace, o);
 775       return;
 776     }
 777 
 778     // Failure!
 779     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);
 780     FREE_C_HEAP_OBJ(trace);
 781   }
 782 }