1 /*
   2  * Copyright (c) 2017, Google and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "prims/jvmtiEnvBase.hpp"
  30 #include "runtime/heapMonitoring.hpp"
  31 #include "runtime/orderAccess.inline.hpp"
  32 #include "runtime/vframe.hpp"
  33 
  34 static const int MaxStackDepth = 1024;
  35 
  36 // Internal data structure representing traces, used when object has been GC'd.
  37 class StackTraceData : public CHeapObj<mtInternal> {
  38  private:
  39   jvmtiAllocTraceInfo* _trace;
  40   int _references;
  41 
  42  public:
  43   StackTraceData(jvmtiAllocTraceInfo* t) : _trace(t), _references(0) {}
  44 
  45   void increment_reference_count() {
  46     _references++;
  47   }
  48 
  49   jvmtiAllocTraceInfo* get_trace() const {
  50     return _trace;
  51   }
  52 
  53   static void unreference_and_free(StackTraceData* data) {
  54     if (!data) {
  55       return;
  56     }
  57 
  58     data->_references--;
  59     if (data->_references == 0) {
  60       if (data->_trace != NULL) {
  61         jvmtiStackInfo* stack_info = data->_trace->stack_info;
  62         FREE_C_HEAP_ARRAY(jvmtiFrameInfo, stack_info->frame_buffer);
  63         FREE_C_HEAP_OBJ(stack_info);
  64         FREE_C_HEAP_OBJ(data->_trace);
  65       }
  66       delete data;
  67     }
  68   }
  69 };
  70 
  71 // Internal data structure representing traces with the oop, used while object
  72 // is live. Since this structure just passes the trace to the GC lists, it does
  73 // not handle any freeing.
  74 class StackTraceDataWithOop : public StackTraceData {
  75  private:
  76   oop _obj;
  77 
  78  public:
  79   StackTraceDataWithOop(jvmtiAllocTraceInfo* t, oop o) : StackTraceData(t) {
  80     store_oop(o);
  81   }
  82 
  83   StackTraceDataWithOop() : StackTraceData(NULL), _obj(NULL) {
  84   }
  85 
  86   oop load_oop() {
  87     return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(&_obj);
  88   }
  89 
  90   oop* get_oop_addr() {
  91     return &_obj;
  92   }
  93 
  94   void store_oop(oop value) {
  95     RootAccess<ON_PHANTOM_OOP_REF>::oop_store(&_obj, value);
  96   }
  97 
  98   void clear_oop() {
  99     store_oop(reinterpret_cast<oop>(NULL));
 100   }
 101 };
 102 
 103 // Fixed size buffer for holding garbage traces.
 104 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
 105  public:
 106   GarbageTracesBuffer(uint32_t size) : _size(size) {
 107     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
 108                                        size,
 109                                        mtInternal);
 110     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
 111   }
 112 
 113   virtual ~GarbageTracesBuffer() {
 114     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
 115   }
 116 
 117   StackTraceData** get_traces() const {
 118     return _garbage_traces;
 119   }
 120 
 121   bool store_trace(StackTraceData* trace) {
 122     uint32_t index;
 123     if (!select_replacement(&index)) {
 124       return false;
 125     }
 126 
 127     StackTraceData* old_data = _garbage_traces[index];
 128     StackTraceData::unreference_and_free(old_data);
 129 
 130     trace->increment_reference_count();
 131     _garbage_traces[index] = trace;
 132     return true;
 133   }
 134 
 135   uint32_t size() const {
 136     return _size;
 137   }
 138 
 139  protected:
 140   // Subclasses select the trace to replace. Returns false if no replacement
 141   // is to happen, otherwise stores the index of the trace to replace in
 142   // *index.
 143   virtual bool select_replacement(uint32_t* index) = 0;
 144 
 145   const uint32_t _size;
 146 
 147  private:
 148   // The current garbage traces.  A fixed-size ring buffer.
 149   StackTraceData** _garbage_traces;
 150 };
 151 
 152 // Keep statistical sample of traces over the lifetime of the server.
 153 // When the buffer is full, replace a random entry with probability
 154 // 1/samples_seen. This strategy tends towards preserving the most frequently
 155 // occuring traces over time.
 156 class FrequentGarbageTraces : public GarbageTracesBuffer {
 157  public:
 158   FrequentGarbageTraces(int size)
 159       : GarbageTracesBuffer(size),
 160       _garbage_traces_pos(0),
 161       _samples_seen(0) {
 162       }
 163 
 164   virtual ~FrequentGarbageTraces() {
 165   }
 166 
 167   virtual bool select_replacement(uint32_t* index) {
 168     ++_samples_seen;
 169 
 170     if (_garbage_traces_pos < _size) {
 171       *index = _garbage_traces_pos++;
 172       return true;
 173     }
 174 
 175     uint64_t random_uint64 =
 176         (static_cast<uint64_t>(::random()) << 32) | ::random();
 177 
 178     uint32_t random_index = random_uint64 % _samples_seen;
 179     if (random_index < _size) {
 180       *index = random_index;
 181       return true;
 182     }
 183 
 184     return false;
 185   }
 186 
 187  private:
 188   // The current position in the buffer as we initially fill it.
 189   uint32_t _garbage_traces_pos;
 190 
 191   uint64_t _samples_seen;
 192 };
 193 
 194 // Store most recent garbage traces.
 195 class MostRecentGarbageTraces : public GarbageTracesBuffer {
 196  public:
 197   MostRecentGarbageTraces(int size)
 198       : GarbageTracesBuffer(size),
 199       _garbage_traces_pos(0) {
 200       }
 201 
 202   virtual ~MostRecentGarbageTraces() {
 203   }
 204 
 205   virtual bool select_replacement(uint32_t* index) {
 206     *index = _garbage_traces_pos;
 207 
 208     _garbage_traces_pos =
 209         (_garbage_traces_pos + 1) % _size;
 210 
 211     return true;
 212   }
 213 
 214  private:
 215   // The current position in the buffer.
 216   uint32_t _garbage_traces_pos;
 217 };
 218 
 219 // Each object that we profile is stored as trace with the thread_id.
 220 class StackTraceStorage : public CHeapObj<mtInternal> {
 221  public:
 222   // The function that gets called to add a trace to the list of
 223   // traces we are maintaining.
 224   void add_trace(jvmtiAllocTraceInfo* trace, oop o);
 225 
 226   // The function that gets called by the client to retrieve the list
 227   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 228   void get_all_stack_traces(JvmtiEnv* env,
 229                             jvmtiAllocTraceInfo** traces,
 230                             jint* trace_counter_ptr);
 231 
 232   // The function that gets called by the client to retrieve the list
 233   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 234   void get_garbage_stack_traces(JvmtiEnv* env,
 235                                 jvmtiAllocTraceInfo** traces,
 236                                 jint* trace_counter_ptr);
 237 
 238   // The function that gets called by the client to retrieve the list
 239   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 240   void get_frequent_garbage_stack_traces(JvmtiEnv* env,
 241                                          jvmtiAllocTraceInfo** traces,
 242                                          jint* trace_counter_ptr);
 243 
 244   // The function that gets called by the client to retrieve the list
 245   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 246   void get_cached_stack_traces(JvmtiEnv* env,
 247                                jvmtiAllocTraceInfo** traces,
 248                                jint* trace_counter_ptr);
 249 
 250   // Executes whenever weak references are traversed.  is_alive tells
 251   // you if the given oop is still reachable and live.
 252   void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 253 
 254   ~StackTraceStorage();
 255   StackTraceStorage();
 256 
 257   static StackTraceStorage* storage() {
 258     static StackTraceStorage internal_storage;
 259     return &internal_storage;
 260   }
 261 
 262   void initialize(int max_storage) {
 263     MutexLocker mu(HeapMonitorStorage_lock);
 264     allocate_storage(max_storage);
 265   }
 266 
 267   void stop() {
 268     MutexLocker mu(HeapMonitorStorage_lock);
 269     free_storage();
 270   }
 271 
 272   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 273     MutexLocker mu(HeapMonitorStorage_lock);
 274     return _stats;
 275   }
 276 
 277   void accumulate_sample_rate(size_t rate) {
 278     MutexLocker mu(HeapMonitorStorage_lock);
 279     _stats.sample_rate_accumulation += rate;
 280     _stats.sample_rate_count++;
 281   }
 282 
 283   bool initialized() {
 284     return OrderAccess::load_acquire(&_initialized) != 0;
 285     return _initialized;
 286   }
 287 
 288  private:
 289   // The traces currently sampled.
 290   GrowableArray<StackTraceDataWithOop>* _allocated_traces;
 291 
 292   // The traces currently sampled.
 293   GrowableArray<StackTraceDataWithOop>* _traces_on_last_full_gc;
 294 
 295   // Recent garbage traces.
 296   MostRecentGarbageTraces* _recent_garbage_traces;
 297 
 298   // Frequent garbage traces.
 299   FrequentGarbageTraces* _frequent_garbage_traces;
 300 
 301   // Heap Sampling statistics.
 302   jvmtiHeapSamplingStats _stats;
 303 
 304   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 305   int _max_gc_storage;
 306 
 307   static StackTraceStorage* internal_storage;
 308   int _initialized;
 309 
 310   // Support functions and classes for copying data to the external
 311   // world.
 312   class StackTraceDataCopier {
 313    public:
 314     virtual int size() const = 0;
 315     virtual const StackTraceData* get(uint32_t i) const = 0;
 316   };
 317 
 318   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 319    public:
 320     LiveStackTraceDataCopier(GrowableArray<StackTraceDataWithOop>* data) :
 321         _data(data) {}
 322     int size() const { return _data ? _data->length() : 0; }
 323     const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); }
 324 
 325    private:
 326     GrowableArray<StackTraceDataWithOop>* _data;
 327   };
 328 
 329   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 330    public:
 331     GarbageStackTraceDataCopier(StackTraceData** data, int size) :
 332         _data(data), _size(size) {}
 333     int size() const { return _size; }
 334     const StackTraceData* get(uint32_t i) const { return _data[i]; }
 335 
 336    private:
 337     StackTraceData** _data;
 338     int _size;
 339   };
 340 
 341   // Creates a deep copy of the list of StackTraceData.
 342   void copy_stack_traces(JvmtiEnv* env,
 343                          const StackTraceDataCopier &copier,
 344                          jvmtiAllocTraceInfo** traces,
 345                          jint* trace_counter_ptr);
 346 
 347   void store_garbage_trace(const StackTraceDataWithOop &trace);
 348 
 349   void free_garbage();
 350   void free_storage();
 351   void reset();
 352 
 353   void allocate_storage(int max_gc_storage);
 354 
 355   int calculate_frame_count(const StackTraceDataCopier &copier);
 356   int calculate_info_count(const StackTraceDataCopier &copier);
 357 
 358   bool copy_frame(const StackTraceData* stack_trace_data,
 359                   jvmtiAllocTraceInfo* current_alloc_traces,
 360                   jvmtiStackInfo* current_stack_info,
 361                   jvmtiFrameInfo* current_frame_info);
 362 
 363   // Returns frame copy success. Failure can result when there is no longer
 364   // enough memory.
 365   bool copy_frames(const StackTraceDataCopier& copier, int info_count,
 366                    unsigned char* start,
 367                    unsigned char* end);
 368 };
 369 
 370 StackTraceStorage* StackTraceStorage::internal_storage;
 371 
 372 // Statics for Sampler
 373 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 374 int HeapMonitoring::_enabled;
 375 jint HeapMonitoring::_monitoring_rate;
 376 
 377 // Cheap random number generator
 378 uint64_t HeapMonitoring::_rnd;
 379 
 380 StackTraceStorage::StackTraceStorage() {
 381   MutexLocker mu(HeapMonitorStorage_lock);
 382   reset();
 383 }
 384 
 385 void StackTraceStorage::reset() {
 386   assert(HeapMonitorStorage_lock->owned_by_self()
 387          || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
 388          "This should not be accessed concurrently");
 389 
 390   _allocated_traces = NULL;
 391   _traces_on_last_full_gc = NULL;
 392   _recent_garbage_traces = NULL;
 393   _frequent_garbage_traces = NULL;
 394   _max_gc_storage = 0;
 395   OrderAccess::release_store(&_initialized, 0);
 396 }
 397 
 398 void StackTraceStorage::free_garbage() {
 399   StackTraceData** recent_garbage = NULL;
 400   uint32_t recent_size = 0;
 401 
 402   StackTraceData** frequent_garbage = NULL;
 403   uint32_t frequent_size = 0;
 404 
 405   if (_recent_garbage_traces != NULL) {
 406     recent_garbage = _recent_garbage_traces->get_traces();
 407     recent_size = _recent_garbage_traces->size();
 408   }
 409 
 410   if (_frequent_garbage_traces != NULL) {
 411     frequent_garbage = _frequent_garbage_traces->get_traces();
 412     frequent_size = _frequent_garbage_traces->size();
 413   }
 414 
 415   // Simple solution since this happens at exit.
 416   // Go through the recent and remove any that only are referenced there.
 417   for (uint32_t i = 0; i < recent_size; i++) {
 418     StackTraceData::unreference_and_free(recent_garbage[i]);
 419   }
 420 
 421   // Then go through the frequent and remove those that are now only there.
 422   for (uint32_t i = 0; i < frequent_size; i++) {
 423     StackTraceData::unreference_and_free(frequent_garbage[i]);
 424   }
 425 }
 426 
 427 void StackTraceStorage::free_storage() {
 428   if (!initialized()) {
 429     return;
 430   }
 431 
 432   delete _allocated_traces;
 433   delete _traces_on_last_full_gc;
 434 
 435   free_garbage();
 436   delete _recent_garbage_traces;
 437   delete _frequent_garbage_traces;
 438 
 439   reset();
 440 }
 441 
 442 StackTraceStorage::~StackTraceStorage() {
 443   MutexLocker mu(HeapMonitorStorage_lock);
 444   free_storage();
 445 }
 446 
 447 void StackTraceStorage::allocate_storage(int max_gc_storage) {
 448   assert(HeapMonitorStorage_lock->owned_by_self()
 449          || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
 450          "This should not be accessed concurrently");
 451 
 452   // In case multiple threads got locked and then 1 by 1 got through.
 453   if (initialized()) {
 454     return;
 455   }
 456 
 457   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 458       GrowableArray<StackTraceDataWithOop>(128, true);
 459   _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
 460       GrowableArray<StackTraceDataWithOop>(128, true);
 461 
 462   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 463   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 464 
 465   _max_gc_storage = max_gc_storage;
 466   memset(&_stats, 0, sizeof(_stats));
 467   OrderAccess::release_store(&_initialized, 1);
 468 }
 469 
 470 void StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) {
 471   MutexLocker mu(HeapMonitorStorage_lock);
 472   // Last minute check on initialization here in case:
 473   //   Between the moment object_alloc_do_sample's check for initialization
 474   //   and now, there was a stop() that deleted the data.
 475   if (initialized()) {
 476     StackTraceDataWithOop new_data(trace, o);
 477     _stats.sample_count++;
 478     _stats.stack_depth_accumulation += trace->stack_info->frame_count;
 479     _allocated_traces->append(new_data);
 480   }
 481 }
 482 
 483 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
 484                                      OopClosure* f) {
 485   size_t count = 0;
 486   if (initialized()) {
 487     int len = _allocated_traces->length();
 488 
 489     _traces_on_last_full_gc->clear();
 490 
 491     // Compact the oop traces.  Moves the live oops to the beginning of the
 492     // growable array, potentially overwriting the dead ones.
 493     for (int i = 0; i < len; i++) {
 494       StackTraceDataWithOop &trace = _allocated_traces->at(i);
 495       oop value = trace.load_oop();
 496       if (is_alive->do_object_b(value)) {
 497         // Update the oop to point to the new object if it is still alive.
 498         f->do_oop(trace.get_oop_addr());
 499 
 500         // Copy the old trace, if it is still live.
 501         _allocated_traces->at_put(count++, trace);
 502 
 503         // Store the live trace in a cache, to be served up on /heapz.
 504         _traces_on_last_full_gc->append(trace);
 505       } else {
 506         trace.clear_oop();
 507 
 508         // If the old trace is no longer live, add it to the list of
 509         // recently collected garbage.
 510         store_garbage_trace(trace);
 511       }
 512     }
 513 
 514     // Zero out remaining array elements.  Even though the call to trunc_to
 515     // below truncates these values, zeroing them out is good practice.
 516     StackTraceDataWithOop zero_trace;
 517     for (int i = count; i < len; i++) {
 518       _allocated_traces->at_put(i, zero_trace);
 519     }
 520 
 521     // Set the array's length to the number of live elements.
 522     _allocated_traces->trunc_to(count);
 523   }
 524 
 525   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 526 }
 527 
 528 // Called by the outside world; returns a copy of the stack traces
 529 // (because we could be replacing them as the user handles them).
 530 // The array is secretly null-terminated (to make it easier to reclaim).
 531 void StackTraceStorage::get_all_stack_traces(JvmtiEnv* env,
 532                                              jvmtiAllocTraceInfo** traces,
 533                                              jint* trace_counter_ptr) {
 534   MutexLocker mu(HeapMonitorStorage_lock);
 535   if (!_allocated_traces) {
 536     *traces = NULL;
 537     *trace_counter_ptr = 0;
 538     return;
 539   }
 540 
 541   LiveStackTraceDataCopier copier(_allocated_traces);
 542   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 543 }
 544 
 545 // See comment on get_all_stack_traces
 546 void StackTraceStorage::get_garbage_stack_traces(JvmtiEnv* env,
 547                                                  jvmtiAllocTraceInfo** traces,
 548                                                  jint* trace_counter_ptr) {
 549   MutexLocker mu(HeapMonitorStorage_lock);
 550   if (!_recent_garbage_traces) {
 551     *traces = NULL;
 552     *trace_counter_ptr = 0;
 553     return;
 554   }
 555 
 556   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 557                                      _recent_garbage_traces->size());
 558   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 559 }
 560 
 561 // See comment on get_all_stack_traces
 562 void StackTraceStorage::get_frequent_garbage_stack_traces(
 563     JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
 564   MutexLocker mu(HeapMonitorStorage_lock);
 565   if (!_frequent_garbage_traces) {
 566     *traces = NULL;
 567     *trace_counter_ptr = 0;
 568     return;
 569   }
 570 
 571   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 572                                      _frequent_garbage_traces->size());
 573   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 574 }
 575 
 576 // See comment on get_all_stack_traces
 577 void StackTraceStorage::get_cached_stack_traces(JvmtiEnv* env,
 578                                                 jvmtiAllocTraceInfo** traces,
 579                                                 jint* trace_counter_ptr) {
 580   MutexLocker mu(HeapMonitorStorage_lock);
 581   if (!_traces_on_last_full_gc) {
 582     *traces = NULL;
 583     *trace_counter_ptr = 0;
 584     return;
 585   }
 586 
 587   LiveStackTraceDataCopier copier(_traces_on_last_full_gc);
 588   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 589 }
 590 
 591 int StackTraceStorage::calculate_frame_count(const StackTraceDataCopier &copier) {
 592   int len = copier.size();
 593 
 594   // Walk the traces first to find the size of the frames as well.
 595   int frame_total = 0;
 596 
 597   for (int i = 0; i < len; i++) {
 598     const StackTraceData* stack_trace = copier.get(i);
 599 
 600     if (stack_trace != NULL) {
 601       jvmtiAllocTraceInfo* trace = stack_trace->get_trace();
 602       jvmtiStackInfo* stack_info = trace->stack_info;
 603       frame_total += stack_info->frame_count;
 604     }
 605   }
 606 
 607   return frame_total;
 608 }
 609 
 610 int StackTraceStorage::calculate_info_count(const StackTraceDataCopier &copier) {
 611   int len = copier.size();
 612 
 613   int info_total = 0;
 614 
 615   for (int i = 0; i < len; i++) {
 616     const StackTraceData* stack_trace = copier.get(i);
 617 
 618     if (stack_trace != NULL) {
 619       // TODO: merge this with the method above.
 620       info_total++;
 621     }
 622   }
 623 
 624   return info_total;
 625 }
 626 
 627 // Method to test if the data structure would fit between the src address and
 628 // the end address.
 629 template<typename T, typename U>
 630 static bool next_ptr_less_or_equal(T src, U* end) {
 631   return (src + 1) <= reinterpret_cast<T>(end);
 632 }
 633 
 634 bool StackTraceStorage::copy_frame(const StackTraceData* stack_trace_data,
 635                                    jvmtiAllocTraceInfo* current_alloc_trace,
 636                                    jvmtiStackInfo* current_stack_info,
 637                                    jvmtiFrameInfo* current_frame_info) {
 638   jvmtiAllocTraceInfo* trace = stack_trace_data->get_trace();
 639   jvmtiStackInfo* stack_info = trace->stack_info;
 640   int frame_count = stack_info->frame_count;
 641 
 642   memcpy(current_alloc_trace, trace, sizeof(*trace));
 643 
 644   current_alloc_trace->stack_info = current_stack_info;
 645   memcpy(current_stack_info, stack_info, sizeof(*stack_info));
 646 
 647   current_stack_info->frame_buffer = current_frame_info;
 648   memcpy(current_frame_info, stack_info->frame_buffer,
 649          sizeof(jvmtiFrameInfo) * frame_count);
 650   return true;
 651 }
 652 
 653 bool StackTraceStorage::copy_frames(const StackTraceDataCopier& copier,
 654                                     int info_count,
 655                                     unsigned char* start,
 656                                     unsigned char* end) {
 657   jvmtiAllocTraceInfo* start_alloc_trace = reinterpret_cast<jvmtiAllocTraceInfo*>(start);
 658   jvmtiStackInfo* start_stack_info = reinterpret_cast<jvmtiStackInfo*>(start_alloc_trace + info_count);
 659   jvmtiFrameInfo* start_frame_info = reinterpret_cast<jvmtiFrameInfo*>(start_stack_info + info_count);
 660 
 661   jvmtiAllocTraceInfo* current_alloc_trace = start_alloc_trace;
 662   jvmtiStackInfo* current_stack_info = start_stack_info;
 663   jvmtiFrameInfo* current_frame_info = start_frame_info;
 664 
 665   for (int i = 0; i < info_count; i++) {
 666     assert(next_ptr_less_or_equal(current_alloc_trace, start_stack_info),
 667            "jvmtiAllocTraceInfo would write over jvmtiStackInfos.");
 668     assert(next_ptr_less_or_equal(current_stack_info, start_frame_info),
 669            "jvmtiStackInfo would write over jvmtiFrameInfos.");
 670 
 671     assert(next_ptr_less_or_equal(current_frame_info, end),
 672            "jvmtiFrameInfo would write over the end of the buffer.");
 673 
 674     const StackTraceData* stack_trace_data = copier.get(i);
 675     if (stack_trace_data != NULL) {
 676       if (!copy_frame(stack_trace_data, current_alloc_trace,
 677                       current_stack_info, current_frame_info)) {
 678         return false;
 679       }
 680 
 681       current_frame_info += current_stack_info->frame_count;
 682       current_stack_info++;
 683       current_alloc_trace++;
 684     }
 685   }
 686 
 687   return true;
 688 }
 689 
 690 void StackTraceStorage::copy_stack_traces(JvmtiEnv* env,
 691                                           const StackTraceDataCopier& copier,
 692                                           jvmtiAllocTraceInfo** traces,
 693                                           jint* trace_counter_ptr) {
 694   *traces = NULL;
 695   *trace_counter_ptr = 0;
 696 
 697   int frame_total = calculate_frame_count(copier);
 698   int len = calculate_info_count(copier);
 699 
 700   // Allocate the whole stacktraces in one bloc to simplify freeing.
 701   size_t total_size = len * sizeof(jvmtiAllocTraceInfo)
 702       + len * sizeof(jvmtiStackInfo)
 703       + frame_total * sizeof(jvmtiFrameInfo);
 704 
 705   unsigned char* buffer = NULL;
 706   jvmtiAllocTraceInfo* result = NULL;
 707   JvmtiEnvBase* env_base = reinterpret_cast<JvmtiEnvBase*>(env);
 708   env_base->allocate(total_size, &buffer);
 709 
 710   if (buffer == NULL) {
 711     return;
 712   }
 713 
 714   bool success = copy_frames(copier, len, buffer, buffer + total_size);
 715 
 716   if (!success) {
 717     env_base->deallocate(buffer);
 718     return;
 719   }
 720 
 721   *trace_counter_ptr = len;
 722   *traces = reinterpret_cast<jvmtiAllocTraceInfo*>(buffer);
 723 }
 724 
 725 void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) {
 726   StackTraceData* new_trace = new StackTraceData(trace.get_trace());
 727 
 728   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 729 
 730   // Accepted is on the right of the boolean to force the store_trace to happen.
 731   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 732 
 733   if (!accepted) {
 734     // No one wanted to use it.
 735     delete new_trace;
 736   }
 737 
 738   _stats.garbage_collected_samples++;
 739 }
 740 
 741 void HeapMonitoring::get_live_traces(JvmtiEnv* env,
 742                                      jvmtiAllocTraceInfo** traces,
 743                                      jint* trace_counter_ptr) {
 744   StackTraceStorage::storage()->get_all_stack_traces(env,
 745                                                      traces,
 746                                                      trace_counter_ptr);
 747 }
 748 
 749 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) {
 750   const jvmtiHeapSamplingStats& internal_stats =
 751       StackTraceStorage::storage()->get_heap_sampling_stats();
 752   *stats = internal_stats;
 753 }
 754 
 755 void HeapMonitoring::get_frequent_garbage_traces(JvmtiEnv* env,
 756                                                  jvmtiAllocTraceInfo** traces,
 757                                                  jint* trace_counter_ptr) {
 758   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(
 759       env, traces, trace_counter_ptr);
 760 }
 761 
 762 void HeapMonitoring::get_garbage_traces(JvmtiEnv* env,
 763                                         jvmtiAllocTraceInfo** traces,
 764                                         jint* trace_counter_ptr) {
 765   StackTraceStorage::storage()->get_garbage_stack_traces(env,
 766                                                          traces,
 767                                                          trace_counter_ptr);
 768 }
 769 
 770 void HeapMonitoring::get_cached_traces(JvmtiEnv* env,
 771                                        jvmtiAllocTraceInfo** traces,
 772                                        jint* trace_counter_ptr) {
 773   StackTraceStorage::storage()->get_cached_stack_traces(env,
 774                                                         traces,
 775                                                         trace_counter_ptr);
 776 }
 777 
 778 // Invoked by the GC to clean up old stack traces and remove old arrays
 779 // of instrumentation that are still lying around.
 780 void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 781   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 782   StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 783 }
 784 
 785 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
 786                                           jint max_gc_storage) {
 787   MutexLocker mu(HeapMonitor_lock);
 788   // Ignore if already enabled.
 789   if (enabled()) {
 790     return;
 791   }
 792 
 793   _monitoring_rate = monitoring_rate;
 794 
 795   // Populate the lookup table for fast_log2.
 796   // This approximates the log2 curve with a step function.
 797   // Steps have height equal to log2 of the mid-point of the step.
 798   for (int i = 0; i < (1 << FastLogNumBits); i++) {
 799     double half_way = static_cast<double>(i + 0.5);
 800     _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
 801   }
 802 
 803   JavaThread* t = static_cast<JavaThread*>(Thread::current());
 804   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 805   if (_rnd == 0) {
 806     _rnd = 1;
 807   }
 808 
 809   StackTraceStorage::storage()->initialize(max_gc_storage);
 810   OrderAccess::release_store(&_enabled, 1);
 811 }
 812 
 813 void HeapMonitoring::stop_profiling() {
 814   MutexLocker mu(HeapMonitor_lock);
 815 
 816   if (enabled()) {
 817     StackTraceStorage::storage()->stop();
 818     OrderAccess::release_store(&_enabled, 0);
 819   }
 820 }
 821 
 822 // Generates a geometric variable with the specified mean (512K by default).
 823 // This is done by generating a random number between 0 and 1 and applying
 824 // the inverse cumulative distribution function for an exponential.
 825 // Specifically: Let m be the inverse of the sample rate, then
 826 // the probability distribution function is m*exp(-mx) so the CDF is
 827 // p = 1 - exp(-mx), so
 828 // q = 1 - p = exp(-mx)
 829 // log_e(q) = -mx
 830 // -log_e(q)/m = x
 831 // log_2(q) * (-log_e(2) * 1/m) = x
 832 // In the code, q is actually in the range 1 to 2**26, hence the -26 below
 833 void HeapMonitoring::pick_next_sample(size_t* ptr) {
 834   _rnd = next_random(_rnd);
 835   // Take the top 26 bits as the random number
 836   // (This plus a 1<<58 sampling bound gives a max possible step of
 837   // 5194297183973780480 bytes.  In this case,
 838   // for sample_parameter = 1<<19, max possible step is
 839   // 9448372 bytes (24 bits).
 840   const uint64_t PrngModPower = 48;  // Number of bits in prng
 841   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 842   // under piii debug for some binaries.
 843   double q = static_cast<uint32_t>(_rnd >> (PrngModPower - 26)) + 1.0;
 844   // Put the computed p-value through the CDF of a geometric.
 845   // For faster performance (save ~1/20th exec time), replace
 846   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 847   // The value 26.000705 is used rather than 26 to compensate
 848   // for inaccuracies in FastLog2 which otherwise result in a
 849   // negative answer.
 850   double log_val = (fast_log2(q) - 26);
 851   size_t rate = static_cast<size_t>(
 852       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 853   *ptr = rate;
 854 
 855   StackTraceStorage::storage()->accumulate_sample_rate(rate);
 856 }
 857 
 858 void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) {
 859   JavaThread* thread = static_cast<JavaThread*>(t);
 860   if (StackTraceStorage::storage()->initialized()) {
 861     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 862     JavaThread* thread = static_cast<JavaThread*>(t);
 863 
 864     jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal);
 865     if (trace == NULL) {
 866       return;
 867     }
 868 
 869     jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal);
 870     if (trace == NULL) {
 871       FREE_C_HEAP_OBJ(trace);
 872       return;
 873     }
 874     trace->stack_info = stack_info;
 875 
 876     jvmtiFrameInfo* frames =
 877         NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 878 
 879     if (frames == NULL) {
 880       FREE_C_HEAP_OBJ(stack_info);
 881       FREE_C_HEAP_OBJ(trace);
 882       return;
 883     }
 884     stack_info->frame_buffer = frames;
 885     stack_info->frame_count = 0;
 886 
 887     trace->thread_id = SharedRuntime::get_java_tid(thread);
 888     trace->size = byte_size;
 889 
 890     if (thread->has_last_Java_frame()) { // just to be safe
 891       vframeStream vfst(thread, true);
 892       int count = 0;
 893       while (!vfst.at_end() && count < MaxStackDepth) {
 894         Method* m = vfst.method();
 895         frames[count].location = vfst.bci();
 896         frames[count].method = m->jmethod_id();
 897         count++;
 898 
 899         vfst.next();
 900       }
 901       stack_info->frame_count = count;
 902     }
 903 
 904     if (stack_info->frame_count > 0) {
 905       // Success!
 906       StackTraceStorage::storage()->add_trace(trace, o);
 907       return;
 908     }
 909 
 910     // Failure!
 911     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames);
 912     FREE_C_HEAP_OBJ(stack_info);
 913     FREE_C_HEAP_OBJ(trace);
 914   }
 915 }