1 /*
   2  * Copyright (c) 2018, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "prims/jvmtiEnvBase.hpp"
  30 #include "runtime/heapMonitoring.hpp"
  31 #include "runtime/orderAccess.inline.hpp"
  32 #include "runtime/vframe.hpp"
  33 
  34 static const int MaxStackDepth = 1024;
  35 
  36 // Internal data structure representing traces, used when object has been GC'd.
  37 class StackTraceData : public CHeapObj<mtInternal> {
  38  private:
  39   jvmtiAllocTraceInfo* _trace;
  40   int _references;
  41 
  42  public:
  43   StackTraceData(jvmtiAllocTraceInfo* t) : _trace(t), _references(0) {}
  44 
  45   void increment_reference_count() {
  46     _references++;
  47   }
  48 
  49   jvmtiAllocTraceInfo* get_trace() const {
  50     return _trace;
  51   }
  52 
  53   static void unreference_and_free(StackTraceData* data) {
  54     if (!data) {
  55       return;
  56     }
  57 
  58     data->_references--;
  59     if (data->_references == 0) {
  60       if (data->_trace != NULL) {
  61         jvmtiStackInfo* stack_info = data->_trace->stack_info;
  62         FREE_C_HEAP_ARRAY(jvmtiFrameInfo, stack_info->frame_buffer);
  63         FREE_C_HEAP_OBJ(stack_info);
  64         FREE_C_HEAP_OBJ(data->_trace);
  65       }
  66       delete data;
  67     }
  68   }
  69 };
  70 
  71 // Internal data structure representing traces with the oop, used while object
  72 // is live. Since this structure just passes the trace to the GC lists, it does
  73 // not handle any freeing.
  74 class StackTraceDataWithOop : public StackTraceData {
  75  private:
  76   oop _obj;
  77 
  78  public:
  79   StackTraceDataWithOop(jvmtiAllocTraceInfo* t, oop o) : StackTraceData(t) {
  80     store_oop(o);
  81   }
  82 
  83   StackTraceDataWithOop() : StackTraceData(NULL), _obj(NULL) {
  84   }
  85 
  86   oop load_oop() {
  87     return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(&_obj);
  88   }
  89 
  90   oop* get_oop_addr() {
  91     return &_obj;
  92   }
  93 
  94   void store_oop(oop value) {
  95     RootAccess<ON_PHANTOM_OOP_REF>::oop_store(&_obj, value);
  96   }
  97 
  98   void clear_oop() {
  99     store_oop(reinterpret_cast<oop>(NULL));
 100   }
 101 };
 102 
 103 // Fixed size buffer for holding garbage traces.
 104 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
 105  public:
 106   GarbageTracesBuffer(uint32_t size) : _size(size) {
 107     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
 108                                        size,
 109                                        mtInternal);
 110     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
 111   }
 112 
 113   virtual ~GarbageTracesBuffer() {
 114     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
 115   }
 116 
 117   StackTraceData** get_traces() const {
 118     return _garbage_traces;
 119   }
 120 
 121   bool store_trace(StackTraceData* trace) {
 122     uint32_t index;
 123     if (!select_replacement(&index)) {
 124       return false;
 125     }
 126 
 127     StackTraceData* old_data = _garbage_traces[index];
 128     StackTraceData::unreference_and_free(old_data);
 129 
 130     trace->increment_reference_count();
 131     _garbage_traces[index] = trace;
 132     return true;
 133   }
 134 
 135   uint32_t size() const {
 136     return _size;
 137   }
 138 
 139  protected:
 140   // Subclasses select the trace to replace. Returns false if no replacement
 141   // is to happen, otherwise stores the index of the trace to replace in
 142   // *index.
 143   virtual bool select_replacement(uint32_t* index) = 0;
 144 
 145   const uint32_t _size;
 146 
 147  private:
 148   // The current garbage traces.  A fixed-size ring buffer.
 149   StackTraceData** _garbage_traces;
 150 };
 151 
 152 // Keep statistical sample of traces over the lifetime of the server.
 153 // When the buffer is full, replace a random entry with probability
 154 // 1/samples_seen. This strategy tends towards preserving the most frequently
 155 // occuring traces over time.
 156 class FrequentGarbageTraces : public GarbageTracesBuffer {
 157  public:
 158   FrequentGarbageTraces(int size)
 159       : GarbageTracesBuffer(size),
 160       _garbage_traces_pos(0),
 161       _samples_seen(0) {
 162       }
 163 
 164   virtual ~FrequentGarbageTraces() {
 165   }
 166 
 167   virtual bool select_replacement(uint32_t* index) {
 168     ++_samples_seen;
 169 
 170     if (_garbage_traces_pos < _size) {
 171       *index = _garbage_traces_pos++;
 172       return true;
 173     }
 174 
 175     uint64_t random_uint64 =
 176         (static_cast<uint64_t>(::random()) << 32) | ::random();
 177 
 178     uint32_t random_index = random_uint64 % _samples_seen;
 179     if (random_index < _size) {
 180       *index = random_index;
 181       return true;
 182     }
 183 
 184     return false;
 185   }
 186 
 187  private:
 188   // The current position in the buffer as we initially fill it.
 189   uint32_t _garbage_traces_pos;
 190 
 191   uint64_t _samples_seen;
 192 };
 193 
 194 // Store most recent garbage traces.
 195 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 196  public:
 197   MostRecentGarbageTraces(int size)
 198       : GarbageTracesBuffer(size),
 199       _garbage_traces_pos(0) {
 200       }
 201 
 202   virtual ~MostRecentGarbageTraces() {
 203   }
 204 
 205   virtual bool select_replacement(uint32_t* index) {
 206     *index = _garbage_traces_pos;
 207 
 208     _garbage_traces_pos =
 209         (_garbage_traces_pos + 1) % _size;
 210 
 211     return true;
 212   }
 213 
 214  private:
 215   // The current position in the buffer.
 216   uint32_t _garbage_traces_pos;
 217 };
 218 
 219 // Each object that we profile is stored as trace with the thread_id.
 220 class StackTraceStorage : public CHeapObj<mtInternal> {
 221  public:
 222   // The function that gets called to add a trace to the list of
 223   // traces we are maintaining.
 224   // Returns if the trace got added or not.
 225   bool add_trace(jvmtiAllocTraceInfo* trace, oop o);
 226 
 227   // The function that gets called by the client to retrieve the list
 228   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 229   void get_live_alloc_stack_traces(JvmtiEnv* env,
 230                                    jvmtiAllocTraceInfo** traces,
 231                                    jint* trace_counter_ptr);
 232 
 233   // The function that gets called by the client to retrieve the list
 234   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 235   void get_garbage_alloc_stack_traces(JvmtiEnv* env,
 236                                       jvmtiAllocTraceInfo** traces,
 237                                       jint* trace_counter_ptr);
 238 
 239   // The function that gets called by the client to retrieve the list
 240   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 241   void get_frequent_garbage_alloc_stack_traces(JvmtiEnv* env,
 242                                                jvmtiAllocTraceInfo** traces,
 243                                                jint* trace_counter_ptr);
 244 
 245   // The function that gets called by the client to retrieve the list
 246   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 247   void get_cached_live_alloc_stack_traces(JvmtiEnv* env,
 248                                           jvmtiAllocTraceInfo** traces,
 249                                           jint* trace_counter_ptr);
 250 
 251   // Executes whenever weak references are traversed.  is_alive tells
 252   // you if the given oop is still reachable and live.
 253   void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 254 
 255   ~StackTraceStorage();
 256   StackTraceStorage();
 257 
 258   static StackTraceStorage* storage() {
 259     static StackTraceStorage internal_storage;
 260     return &internal_storage;
 261   }
 262 
 263   void initialize(int max_storage) {
 264     MutexLocker mu(HeapMonitorStorage_lock);
 265     allocate_storage(max_storage);
 266   }
 267 
 268   void stop() {
 269     MutexLocker mu(HeapMonitorStorage_lock);
 270     free_storage();
 271   }
 272 
 273   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 274     MutexLocker mu(HeapMonitorStorage_lock);
 275     return _stats;
 276   }
 277 
 278   void accumulate_sample_rate(size_t rate) {
 279     MutexLocker mu(HeapMonitorStorage_lock);
 280     _stats.sample_rate_accumulation += rate;
 281     _stats.sample_rate_count++;
 282   }
 283 
 284  private:
 285   // The traces currently sampled.
 286   GrowableArray<StackTraceDataWithOop>* _allocated_traces;
 287 
 288   // The traces currently sampled.
 289   GrowableArray<StackTraceDataWithOop>* _traces_on_last_full_gc;
 290 
 291   // Recent garbage traces.
 292   MostRecentGarbageTraces* _recent_garbage_traces;
 293 
 294   // Frequent garbage traces.
 295   FrequentGarbageTraces* _frequent_garbage_traces;
 296 
 297   // Heap Sampling statistics.
 298   jvmtiHeapSamplingStats _stats;
 299 
 300   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 301   int _max_gc_storage;
 302 
 303   static StackTraceStorage* internal_storage;
 304   bool _initialized;
 305 
 306   // Support functions and classes for copying data to the external
 307   // world.
 308   class StackTraceDataCopier {
 309    public:
 310     virtual int size() const = 0;
 311     virtual const StackTraceData* get(uint32_t i) const = 0;
 312   };
 313 
 314   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 315    public:
 316     LiveStackTraceDataCopier(GrowableArray<StackTraceDataWithOop>* data) :
 317         _data(data) {}
 318     int size() const { return _data ? _data->length() : 0; }
 319     const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); }
 320 
 321    private:
 322     GrowableArray<StackTraceDataWithOop>* _data;
 323   };
 324 
 325   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 326    public:
 327     GarbageStackTraceDataCopier(StackTraceData** data, int size) :
 328         _data(data), _size(size) {}
 329     int size() const { return _size; }
 330     const StackTraceData* get(uint32_t i) const { return _data[i]; }
 331 
 332    private:
 333     StackTraceData** _data;
 334     int _size;
 335   };
 336 
 337   // Creates a deep copy of the list of StackTraceData.
 338   void copy_stack_traces(JvmtiEnv* env,
 339                          const StackTraceDataCopier &copier,
 340                          jvmtiAllocTraceInfo** traces,
 341                          jint* trace_counter_ptr);
 342 
 343   void store_garbage_trace(const StackTraceDataWithOop &trace);
 344 
 345   void free_garbage();
 346   void free_storage();
 347   void reset();
 348 
 349   void allocate_storage(int max_gc_storage);
 350 
 351   int calculate_frame_count(const StackTraceDataCopier &copier);
 352   int calculate_info_count(const StackTraceDataCopier &copier);
 353 
 354   bool copy_frame(const StackTraceData* stack_trace_data,
 355                   jvmtiAllocTraceInfo* current_alloc_traces,
 356                   jvmtiStackInfo* current_stack_info,
 357                   jvmtiFrameInfo* current_frame_info);
 358 
 359   // Returns frame copy success. Failure can result when there is no longer
 360   // enough memory.
 361   bool copy_frames(const StackTraceDataCopier& copier, int info_count,
 362                    unsigned char* start,
 363                    unsigned char* end);
 364 };
 365 
 366 StackTraceStorage* StackTraceStorage::internal_storage;
 367 
 368 // Statics for Sampler
 369 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 370 int HeapMonitoring::_enabled;
 371 jint HeapMonitoring::_monitoring_rate;
 372 
 373 // Cheap random number generator
 374 uint64_t HeapMonitoring::_rnd;
 375 
 376 StackTraceStorage::StackTraceStorage() {
 377   MutexLocker mu(HeapMonitorStorage_lock);
 378   reset();
 379 }
 380 
 381 void StackTraceStorage::reset() {
 382   assert(HeapMonitorStorage_lock->owned_by_self()
 383          || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
 384          "This should not be accessed concurrently");
 385 
 386   _allocated_traces = NULL;
 387   _traces_on_last_full_gc = NULL;
 388   _recent_garbage_traces = NULL;
 389   _frequent_garbage_traces = NULL;
 390   _max_gc_storage = 0;
 391   _initialized = false;
 392 }
 393 
 394 void StackTraceStorage::free_garbage() {
 395   StackTraceData** recent_garbage = NULL;
 396   uint32_t recent_size = 0;
 397 
 398   StackTraceData** frequent_garbage = NULL;
 399   uint32_t frequent_size = 0;
 400 
 401   if (_recent_garbage_traces != NULL) {
 402     recent_garbage = _recent_garbage_traces->get_traces();
 403     recent_size = _recent_garbage_traces->size();
 404   }
 405 
 406   if (_frequent_garbage_traces != NULL) {
 407     frequent_garbage = _frequent_garbage_traces->get_traces();
 408     frequent_size = _frequent_garbage_traces->size();
 409   }
 410 
 411   // Simple solution since this happens at exit.
 412   // Go through the recent and remove any that only are referenced there.
 413   for (uint32_t i = 0; i < recent_size; i++) {
 414     StackTraceData::unreference_and_free(recent_garbage[i]);
 415   }
 416 
 417   // Then go through the frequent and remove those that are now only there.
 418   for (uint32_t i = 0; i < frequent_size; i++) {
 419     StackTraceData::unreference_and_free(frequent_garbage[i]);
 420   }
 421 }
 422 
 423 void StackTraceStorage::free_storage() {
 424   if (!_initialized) {
 425     return;
 426   }
 427 
 428   delete _allocated_traces;
 429   delete _traces_on_last_full_gc;
 430 
 431   free_garbage();
 432   delete _recent_garbage_traces;
 433   delete _frequent_garbage_traces;
 434 
 435   reset();
 436 }
 437 
 438 StackTraceStorage::~StackTraceStorage() {
 439   free_storage();
 440 }
 441 
 442 void StackTraceStorage::allocate_storage(int max_gc_storage) {
 443   assert(HeapMonitorStorage_lock->owned_by_self()
 444          || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
 445          "This should not be accessed concurrently");
 446 
 447   // In case multiple threads got locked and then 1 by 1 got through.
 448   if (_initialized) {
 449     return;
 450   }
 451 
 452   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 453       GrowableArray<StackTraceDataWithOop>(128, true);
 454   _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
 455       GrowableArray<StackTraceDataWithOop>(128, true);
 456 
 457   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 458   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 459 
 460   _max_gc_storage = max_gc_storage;
 461   memset(&_stats, 0, sizeof(_stats));
 462   _initialized = true;
 463 }
 464 
 465 bool StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) {
 466   MutexLocker mu(HeapMonitorStorage_lock);
 467   // Last minute check on initialization here in case the system got turned off
 468   // and a few sample requests got through to here, ie:
 469   //    A sample point happened in TLAB, that code checks for
 470   //    HeapMonitoring::enabled and calls object_alloc_do_sample.
 471   //    The code starts getting a stacktrace and then calls the this add_trace.
 472   //
 473   //    At the same time, another thread has turned off HeapMonitoring while the
 474   //    stacktraces were getting constructed and disables StackTraceStorage.
 475   //
 476   //    Both disabling and this add_trace are protected by the same
 477   //    HeapMonitorStorage_lock mutex.
 478   if (!_initialized) {
 479     return false;
 480   }
 481 
 482   StackTraceDataWithOop new_data(trace, o);
 483   _stats.sample_count++;
 484   _stats.stack_depth_accumulation += trace->stack_info->frame_count;
 485   _allocated_traces->append(new_data);
 486   return true;
 487 }
 488 
 489 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
 490                                      OopClosure* f) {
 491   size_t count = 0;
 492   if (_initialized) {
 493     int len = _allocated_traces->length();
 494 
 495     _traces_on_last_full_gc->clear();
 496 
 497     // Compact the oop traces.  Moves the live oops to the beginning of the
 498     // growable array, potentially overwriting the dead ones.
 499     for (int i = 0; i < len; i++) {
 500       StackTraceDataWithOop &trace = _allocated_traces->at(i);
 501       oop value = trace.load_oop();
 502       if (is_alive->do_object_b(value)) {
 503         // Update the oop to point to the new object if it is still alive.
 504         f->do_oop(trace.get_oop_addr());
 505 
 506         // Copy the old trace, if it is still live.
 507         _allocated_traces->at_put(count++, trace);
 508 
 509         // Store the live trace in a cache, to be served up on /heapz.
 510         _traces_on_last_full_gc->append(trace);
 511       } else {
 512         trace.clear_oop();
 513 
 514         // If the old trace is no longer live, add it to the list of
 515         // recently collected garbage.
 516         store_garbage_trace(trace);
 517       }
 518     }
 519 
 520     // Zero out remaining array elements.  Even though the call to trunc_to
 521     // below truncates these values, zeroing them out is good practice.
 522     StackTraceDataWithOop zero_trace;
 523     for (int i = count; i < len; i++) {
 524       _allocated_traces->at_put(i, zero_trace);
 525     }
 526 
 527     // Set the array's length to the number of live elements.
 528     _allocated_traces->trunc_to(count);
 529   }
 530 
 531   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 532 }
 533 
 534 // Called by the outside world; returns a copy of the stack traces
 535 // (because we could be replacing them as the user handles them).
 536 // The array is secretly null-terminated (to make it easier to reclaim).
 537 void StackTraceStorage::get_live_alloc_stack_traces(
 538     JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
 539   MutexLocker mu(HeapMonitorStorage_lock);
 540   if (!_allocated_traces) {
 541     *traces = NULL;
 542     *trace_counter_ptr = 0;
 543     return;
 544   }
 545 
 546   LiveStackTraceDataCopier copier(_allocated_traces);
 547   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 548 }
 549 
 550 // See comment on get_all_stack_traces
 551 void StackTraceStorage::get_garbage_alloc_stack_traces(
 552     JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
 553   MutexLocker mu(HeapMonitorStorage_lock);
 554   if (!_recent_garbage_traces) {
 555     *traces = NULL;
 556     *trace_counter_ptr = 0;
 557     return;
 558   }
 559 
 560   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 561                                      _recent_garbage_traces->size());
 562   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 563 }
 564 
 565 // See comment on get_all_stack_traces
 566 void StackTraceStorage::get_frequent_garbage_alloc_stack_traces(
 567     JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
 568   MutexLocker mu(HeapMonitorStorage_lock);
 569   if (!_frequent_garbage_traces) {
 570     *traces = NULL;
 571     *trace_counter_ptr = 0;
 572     return;
 573   }
 574 
 575   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 576                                      _frequent_garbage_traces->size());
 577   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 578 }
 579 
 580 // See comment on get_all_stack_traces
 581 void StackTraceStorage::get_cached_live_alloc_stack_traces(
 582     JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
 583   MutexLocker mu(HeapMonitorStorage_lock);
 584   if (!_traces_on_last_full_gc) {
 585     *traces = NULL;
 586     *trace_counter_ptr = 0;
 587     return;
 588   }
 589 
 590   LiveStackTraceDataCopier copier(_traces_on_last_full_gc);
 591   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 592 }
 593 
 594 int StackTraceStorage::calculate_frame_count(const StackTraceDataCopier &copier) {
 595   int len = copier.size();
 596 
 597   // Walk the traces first to find the size of the frames as well.
 598   int frame_total = 0;
 599 
 600   for (int i = 0; i < len; i++) {
 601     const StackTraceData* stack_trace = copier.get(i);
 602 
 603     if (stack_trace != NULL) {
 604       jvmtiAllocTraceInfo* trace = stack_trace->get_trace();
 605       jvmtiStackInfo* stack_info = trace->stack_info;
 606       frame_total += stack_info->frame_count;
 607     }
 608   }
 609 
 610   return frame_total;
 611 }
 612 
 613 int StackTraceStorage::calculate_info_count(const StackTraceDataCopier &copier) {
 614   int len = copier.size();
 615 
 616   int info_total = 0;
 617 
 618   for (int i = 0; i < len; i++) {
 619     const StackTraceData* stack_trace = copier.get(i);
 620 
 621     if (stack_trace != NULL) {
 622       // TODO: merge this with the method above.
 623       info_total++;
 624     }
 625   }
 626 
 627   return info_total;
 628 }
 629 
 630 // Method to test if the data structure would fit between the src address and
 631 // the end address.
 632 template<typename T, typename U>
 633 static bool next_ptr_less_or_equal(T src, U* end) {
 634   return (src + 1) <= reinterpret_cast<T>(end);
 635 }
 636 
 637 bool StackTraceStorage::copy_frame(const StackTraceData* stack_trace_data,
 638                                    jvmtiAllocTraceInfo* current_alloc_trace,
 639                                    jvmtiStackInfo* current_stack_info,
 640                                    jvmtiFrameInfo* current_frame_info) {
 641   jvmtiAllocTraceInfo* trace = stack_trace_data->get_trace();
 642   jvmtiStackInfo* stack_info = trace->stack_info;
 643   int frame_count = stack_info->frame_count;
 644 
 645   memcpy(current_alloc_trace, trace, sizeof(*trace));
 646 
 647   current_alloc_trace->stack_info = current_stack_info;
 648   memcpy(current_stack_info, stack_info, sizeof(*stack_info));
 649 
 650   current_stack_info->frame_buffer = current_frame_info;
 651   memcpy(current_frame_info, stack_info->frame_buffer,
 652          sizeof(jvmtiFrameInfo) * frame_count);
 653   return true;
 654 }
 655 
 656 bool StackTraceStorage::copy_frames(const StackTraceDataCopier& copier,
 657                                     int info_count,
 658                                     unsigned char* start,
 659                                     unsigned char* end) {
 660   jvmtiAllocTraceInfo* start_alloc_trace = reinterpret_cast<jvmtiAllocTraceInfo*>(start);
 661   jvmtiStackInfo* start_stack_info = reinterpret_cast<jvmtiStackInfo*>(start_alloc_trace + info_count);
 662   jvmtiFrameInfo* start_frame_info = reinterpret_cast<jvmtiFrameInfo*>(start_stack_info + info_count);
 663 
 664   jvmtiAllocTraceInfo* current_alloc_trace = start_alloc_trace;
 665   jvmtiStackInfo* current_stack_info = start_stack_info;
 666   jvmtiFrameInfo* current_frame_info = start_frame_info;
 667 
 668   for (int i = 0; i < info_count; i++) {
 669     assert(next_ptr_less_or_equal(current_alloc_trace, start_stack_info),
 670            "jvmtiAllocTraceInfo would write over jvmtiStackInfos.");
 671     assert(next_ptr_less_or_equal(current_stack_info, start_frame_info),
 672            "jvmtiStackInfo would write over jvmtiFrameInfos.");
 673 
 674     assert(next_ptr_less_or_equal(current_frame_info, end),
 675            "jvmtiFrameInfo would write over the end of the buffer.");
 676 
 677     const StackTraceData* stack_trace_data = copier.get(i);
 678     if (stack_trace_data != NULL) {
 679       if (!copy_frame(stack_trace_data, current_alloc_trace,
 680                       current_stack_info, current_frame_info)) {
 681         return false;
 682       }
 683 
 684       current_frame_info += current_stack_info->frame_count;
 685       current_stack_info++;
 686       current_alloc_trace++;
 687     }
 688   }
 689 
 690   return true;
 691 }
 692 
 693 void StackTraceStorage::copy_stack_traces(JvmtiEnv* env,
 694                                           const StackTraceDataCopier& copier,
 695                                           jvmtiAllocTraceInfo** traces,
 696                                           jint* trace_counter_ptr) {
 697   *traces = NULL;
 698   *trace_counter_ptr = 0;
 699 
 700   int frame_total = calculate_frame_count(copier);
 701   int len = calculate_info_count(copier);
 702 
 703   // Allocate the whole stacktraces in one bloc to simplify freeing.
 704   size_t total_size = len * sizeof(jvmtiAllocTraceInfo)
 705       + len * sizeof(jvmtiStackInfo)
 706       + frame_total * sizeof(jvmtiFrameInfo);
 707 
 708   unsigned char* buffer = NULL;
 709   jvmtiAllocTraceInfo* result = NULL;
 710   JvmtiEnvBase* env_base = reinterpret_cast<JvmtiEnvBase*>(env);
 711   env_base->allocate(total_size, &buffer);
 712 
 713   if (buffer == NULL) {
 714     return;
 715   }
 716 
 717   bool success = copy_frames(copier, len, buffer, buffer + total_size);
 718 
 719   if (!success) {
 720     env_base->deallocate(buffer);
 721     return;
 722   }
 723 
 724   *trace_counter_ptr = len;
 725   *traces = reinterpret_cast<jvmtiAllocTraceInfo*>(buffer);
 726 }
 727 
 728 void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) {
 729   StackTraceData* new_trace = new StackTraceData(trace.get_trace());
 730 
 731   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 732 
 733   // Accepted is on the right of the boolean to force the store_trace to happen.
 734   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 735 
 736   if (!accepted) {
 737     // No one wanted to use it.
 738     delete new_trace;
 739   }
 740 
 741   _stats.garbage_collected_samples++;
 742 }
 743 
 744 void HeapMonitoring::get_live_alloc_traces(JvmtiEnv* env,
 745                                            jvmtiAllocTraceInfo** traces,
 746                                            jint* trace_counter_ptr) {
 747   StackTraceStorage::storage()->get_live_alloc_stack_traces(
 748       env, traces, trace_counter_ptr);
 749 }
 750 
 751 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) {
 752   const jvmtiHeapSamplingStats& internal_stats =
 753       StackTraceStorage::storage()->get_heap_sampling_stats();
 754   *stats = internal_stats;
 755 }
 756 
 757 void HeapMonitoring::get_frequent_garbage_alloc_traces(
 758     JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
 759   StackTraceStorage::storage()->get_frequent_garbage_alloc_stack_traces(
 760       env, traces, trace_counter_ptr);
 761 }
 762 
 763 void HeapMonitoring::get_garbage_alloc_traces(
 764     JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
 765   StackTraceStorage::storage()->get_garbage_alloc_stack_traces(
 766       env, traces, trace_counter_ptr);
 767 }
 768 
 769 void HeapMonitoring::get_cached_live_alloc_traces(
 770     JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
 771   StackTraceStorage::storage()->get_cached_live_alloc_stack_traces(
 772       env, traces, trace_counter_ptr);
 773 }
 774 
 775 // Invoked by the GC to clean up old stack traces and remove old arrays
 776 // of instrumentation that are still lying around.
 777 void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 778   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 779   StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 780 }
 781 
 782 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
 783                                           jint max_gc_storage) {
 784   MutexLocker mu(HeapMonitor_lock);
 785 
 786   // Ignore if already enabled.
 787   if (enabled()) {
 788     return;
 789   }
 790 
 791   _monitoring_rate = monitoring_rate;
 792 
 793   // Populate the lookup table for fast_log2.
 794   // This approximates the log2 curve with a step function.
 795   // Steps have height equal to log2 of the mid-point of the step.
 796   for (int i = 0; i < (1 << FastLogNumBits); i++) {
 797     double half_way = static_cast<double>(i + 0.5);
 798     _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
 799   }
 800 
 801   JavaThread* t = static_cast<JavaThread*>(Thread::current());
 802   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 803   if (_rnd == 0) {
 804     _rnd = 1;
 805   }
 806 
 807   StackTraceStorage::storage()->initialize(max_gc_storage);
 808   OrderAccess::release_store(&_enabled, 1);
 809 }
 810 
 811 void HeapMonitoring::stop_profiling() {
 812   MutexLocker mu(HeapMonitor_lock);
 813 
 814   if (enabled()) {
 815     StackTraceStorage::storage()->stop();
 816     OrderAccess::release_store(&_enabled, 0);
 817   }
 818 }
 819 
 820 // Generates a geometric variable with the specified mean (512K by default).
 821 // This is done by generating a random number between 0 and 1 and applying
 822 // the inverse cumulative distribution function for an exponential.
 823 // Specifically: Let m be the inverse of the sample rate, then
 824 // the probability distribution function is m*exp(-mx) so the CDF is
 825 // p = 1 - exp(-mx), so
 826 // q = 1 - p = exp(-mx)
 827 // log_e(q) = -mx
 828 // -log_e(q)/m = x
 829 // log_2(q) * (-log_e(2) * 1/m) = x
 830 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 831 void HeapMonitoring::pick_next_sample(size_t* ptr) {
 832   _rnd = next_random(_rnd);
 833   // Take the top 26 bits as the random number
 834   // (This plus a 1<<58 sampling bound gives a max possible step of
 835   // 5194297183973780480 bytes.  In this case,
 836   // for sample_parameter = 1<<19, max possible step is
 837   // 9448372 bytes (24 bits).
 838   const uint64_t PrngModPower = 48;  // Number of bits in prng
 839   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 840   // under piii debug for some binaries.
 841   double q = static_cast<uint32_t>(_rnd >> (PrngModPower - 26)) + 1.0;
 842   // Put the computed p-value through the CDF of a geometric.
 843   // For faster performance (save ~1/20th exec time), replace
 844   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 845   // The value 26.000705 is used rather than 26 to compensate
 846   // for inaccuracies in FastLog2 which otherwise result in a
 847   // negative answer.
 848   double log_val = (fast_log2(q) - 26);
 849   size_t rate = static_cast<size_t>(
 850       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 851   *ptr = rate;
 852 
 853   StackTraceStorage::storage()->accumulate_sample_rate(rate);
 854 }
 855 
 856 void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) {
 857   JavaThread* thread = static_cast<JavaThread*>(t);
 858   assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 859 
 860   jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal);
 861   if (trace == NULL) {
 862     return;
 863   }
 864 
 865   jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal);
 866   if (trace == NULL) {
 867     FREE_C_HEAP_OBJ(trace);
 868     return;
 869   }
 870   trace->stack_info = stack_info;
 871 
 872   jvmtiFrameInfo* frames =
 873       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 874 
 875   if (frames == NULL) {
 876     FREE_C_HEAP_OBJ(stack_info);
 877     FREE_C_HEAP_OBJ(trace);
 878     return;
 879   }
 880   stack_info->frame_buffer = frames;
 881   stack_info->frame_count = 0;
 882 
 883   trace->thread_id = SharedRuntime::get_java_tid(thread);
 884   trace->size = byte_size;
 885 
 886   if (thread->has_last_Java_frame()) { // just to be safe
 887     vframeStream vfst(thread, true);
 888     int count = 0;
 889     while (!vfst.at_end() && count < MaxStackDepth) {
 890       Method* m = vfst.method();
 891       frames[count].location = vfst.bci();
 892       frames[count].method = m->jmethod_id();
 893       count++;
 894 
 895       vfst.next();
 896     }
 897     stack_info->frame_count = count;
 898   }
 899 
 900   if (stack_info->frame_count > 0) {
 901     if (StackTraceStorage::storage()->add_trace(trace, o)) {
 902       return;
 903     }
 904   }
 905 
 906   // Failure!
 907   FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames);
 908   FREE_C_HEAP_OBJ(stack_info);
 909   FREE_C_HEAP_OBJ(trace);
 910 }