< prev index next >

src/hotspot/share/runtime/heapMonitoring.cpp

Print this page
rev 48551 : [mq]: heap8
rev 48552 : [mq]: heap10a
rev 48553 : [mq]: heap14_rebased
rev 48555 : [mq]: heap16
rev 48556 : [mq]: heap17
rev 48557 : [mq]: heap17
rev 48558 : [mq]: heap19
rev 48559 : [mq]: heap20
rev 48560 : [mq]: heap21
rev 48562 : [mq]: heap23


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"

  29 #include "runtime/heapMonitoring.hpp"
  30 #include "runtime/orderAccess.inline.hpp"
  31 #include "runtime/vframe.hpp"
  32 
  33 static const int MaxStackDepth = 1024;
  34 
  35 // Internal data structure representing traces, used when object has been GC'd.
  36 class StackTraceData : public CHeapObj<mtInternal> {
  37  private:
  38   jvmtiStackTrace* _trace;
  39   int _references;
  40 
  41  public:
  42   StackTraceData(jvmtiStackTrace* t) : _trace(t), _references(0) {}
  43 
  44   void increment_reference_count() {
  45     _references++;
  46   }
  47 
  48   jvmtiStackTrace* get_trace() const {
  49     return _trace;
  50   }
  51 
  52   static void unreference_and_free(StackTraceData* data) {
  53     if (!data) {
  54       return;
  55     }
  56 
  57     data->_references--;
  58     if (data->_references == 0) {
  59       if (data->_trace != NULL) {
  60         FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->_trace->frames);


  61         FREE_C_HEAP_OBJ(data->_trace);
  62       }
  63       delete data;
  64     }
  65   }
  66 };
  67 
  68 // Internal data structure representing traces with the oop, used while object
  69 // is live. Since this structure just passes the trace to the GC lists, it does
  70 // not handle any freeing.
  71 class StackTraceDataWithOop : public StackTraceData {
  72  private:
  73   oop _obj;
  74 
  75  public:
  76   StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) {
  77     store_oop(o);
  78   }
  79 
  80   StackTraceDataWithOop() : StackTraceData(NULL), _obj(NULL) {
  81   }
  82 
  83   oop load_oop() {
  84     return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(&_obj);
  85   }
  86 
  87   oop* get_oop_addr() {
  88     return &_obj;
  89   }
  90 
  91   void store_oop(oop value) {
  92     RootAccess<ON_PHANTOM_OOP_REF>::oop_store(&_obj, value);
  93   }
  94 
  95   void clear_oop() {
  96     store_oop(reinterpret_cast<oop>(NULL));


 201 
 202   virtual bool select_replacement(uint32_t* index) {
 203     *index = _garbage_traces_pos;
 204 
 205     _garbage_traces_pos =
 206         (_garbage_traces_pos + 1) % _size;
 207 
 208     return true;
 209   }
 210 
 211  private:
 212   // The current position in the buffer.
 213   uint32_t _garbage_traces_pos;
 214 };
 215 
 216 // Each object that we profile is stored as trace with the thread_id.
 217 class StackTraceStorage : public CHeapObj<mtInternal> {
 218  public:
 219   // The function that gets called to add a trace to the list of
 220   // traces we are maintaining.
 221   void add_trace(jvmtiStackTrace* trace, oop o);
 222 
 223   // The function that gets called by the client to retrieve the list
 224   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 225   void get_all_stack_traces(jvmtiStackTraces* traces);


 226 
 227   // The function that gets called by the client to retrieve the list
 228   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 229   void get_garbage_stack_traces(jvmtiStackTraces* traces);


 230 
 231   // The function that gets called by the client to retrieve the list
 232   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 233   void get_frequent_garbage_stack_traces(jvmtiStackTraces* traces);


 234 
 235   // The function that gets called by the client to retrieve the list
 236   // of stack traces. Passes a jvmtiStackTraces which will get mutated.
 237   void get_cached_stack_traces(jvmtiStackTraces* traces);


 238 
 239   // Executes whenever weak references are traversed.  is_alive tells
 240   // you if the given oop is still reachable and live.
 241   void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 242 
 243   ~StackTraceStorage();
 244   StackTraceStorage();
 245 
 246   static StackTraceStorage* storage() {
 247     static StackTraceStorage internal_storage;
 248     return &internal_storage;
 249   }
 250 
 251   void initialize(int max_storage) {
 252     MutexLocker mu(HeapMonitorStorage_lock);
 253     allocate_storage(max_storage);
 254   }
 255 
 256   void stop() {
 257     MutexLocker mu(HeapMonitorStorage_lock);
 258     free_storage();
 259   }
 260 
 261   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 262     MutexLocker mu(HeapMonitorStorage_lock);
 263     return _stats;
 264   }
 265 
 266   void accumulate_sample_rate(size_t rate) {
 267     MutexLocker mu(HeapMonitorStorage_lock);
 268     _stats.sample_rate_accumulation += rate;
 269     _stats.sample_rate_count++;
 270   }
 271 
 272   bool initialized() {
 273     return OrderAccess::load_acquire(&_initialized) != 0;

 274   }
 275 
 276  private:
 277   // The traces currently sampled.
 278   GrowableArray<StackTraceDataWithOop>* _allocated_traces;
 279 
 280   // The traces currently sampled.
 281   GrowableArray<StackTraceDataWithOop>* _traces_on_last_full_gc;
 282 
 283   // Recent garbage traces.
 284   MostRecentGarbageTraces* _recent_garbage_traces;
 285 
 286   // Frequent garbage traces.
 287   FrequentGarbageTraces* _frequent_garbage_traces;
 288 
 289   // Heap Sampling statistics.
 290   jvmtiHeapSamplingStats _stats;
 291 
 292   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 293   int _max_gc_storage;


 309         _data(data) {}
 310     int size() const { return _data ? _data->length() : 0; }
 311     const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); }
 312 
 313    private:
 314     GrowableArray<StackTraceDataWithOop>* _data;
 315   };
 316 
 317   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 318    public:
 319     GarbageStackTraceDataCopier(StackTraceData** data, int size) :
 320         _data(data), _size(size) {}
 321     int size() const { return _size; }
 322     const StackTraceData* get(uint32_t i) const { return _data[i]; }
 323 
 324    private:
 325     StackTraceData** _data;
 326     int _size;
 327   };
 328 
 329   // Copies from StackTraceData to jvmtiStackTrace.
 330   bool deep_copy(jvmtiStackTrace* to, const StackTraceData* from);
 331 
 332   // Creates a deep copy of the list of StackTraceData.
 333   void copy_stack_traces(const StackTraceDataCopier &copier,
 334                          jvmtiStackTraces* traces);


 335 
 336   void store_garbage_trace(const StackTraceDataWithOop &trace);
 337 
 338   void free_garbage();
 339   void free_storage();
 340   void reset();
 341 
 342   void allocate_storage(int max_gc_storage);














 343 };
 344 
 345 StackTraceStorage* StackTraceStorage::internal_storage;
 346 
 347 // Statics for Sampler
 348 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 349 int HeapMonitoring::_enabled;
 350 jint HeapMonitoring::_monitoring_rate;
 351 
 352 // Cheap random number generator
 353 uint64_t HeapMonitoring::_rnd;
 354 
 355 StackTraceStorage::StackTraceStorage() {

 356   reset();
 357 }
 358 
 359 void StackTraceStorage::reset() {




 360   _allocated_traces = NULL;
 361   _traces_on_last_full_gc = NULL;
 362   _recent_garbage_traces = NULL;
 363   _frequent_garbage_traces = NULL;
 364   _max_gc_storage = 0;
 365   OrderAccess::release_store(&_initialized, 0);
 366 }
 367 
 368 void StackTraceStorage::free_garbage() {
 369   StackTraceData** recent_garbage = NULL;
 370   uint32_t recent_size = 0;
 371 
 372   StackTraceData** frequent_garbage = NULL;
 373   uint32_t frequent_size = 0;
 374 
 375   if (_recent_garbage_traces != NULL) {
 376     recent_garbage = _recent_garbage_traces->get_traces();
 377     recent_size = _recent_garbage_traces->size();
 378   }
 379 


 398   if (!initialized()) {
 399     return;
 400   }
 401 
 402   delete _allocated_traces;
 403   delete _traces_on_last_full_gc;
 404 
 405   free_garbage();
 406   delete _recent_garbage_traces;
 407   delete _frequent_garbage_traces;
 408 
 409   reset();
 410 }
 411 
 412 StackTraceStorage::~StackTraceStorage() {
 413   MutexLocker mu(HeapMonitorStorage_lock);
 414   free_storage();
 415 }
 416 
 417 void StackTraceStorage::allocate_storage(int max_gc_storage) {




 418   // In case multiple threads got locked and then 1 by 1 got through.
 419   if (initialized()) {
 420     return;
 421   }
 422 
 423   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 424       GrowableArray<StackTraceDataWithOop>(128, true);
 425   _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
 426       GrowableArray<StackTraceDataWithOop>(128, true);
 427 
 428   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 429   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 430 
 431   _max_gc_storage = max_gc_storage;
 432   memset(&_stats, 0, sizeof(_stats));
 433   OrderAccess::release_store(&_initialized, 1);
 434 }
 435 
 436 void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) {
 437   MutexLocker mu(HeapMonitorStorage_lock);
 438   // Last minute check on initialization here in case:
 439   //   Between the moment object_alloc_do_sample's check for initialization
 440   //   and now, there was a stop() that deleted the data.
 441   if (initialized()) {
 442     StackTraceDataWithOop new_data(trace, o);
 443     _stats.sample_count++;
 444     _stats.stack_depth_accumulation += trace->frame_count;
 445     _allocated_traces->append(new_data);
 446   }
 447 }
 448 
 449 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
 450                                      OopClosure* f) {
 451   size_t count = 0;
 452   if (initialized()) {
 453     int len = _allocated_traces->length();
 454 
 455     _traces_on_last_full_gc->clear();
 456 
 457     // Compact the oop traces.  Moves the live oops to the beginning of the
 458     // growable array, potentially overwriting the dead ones.
 459     for (int i = 0; i < len; i++) {
 460       StackTraceDataWithOop &trace = _allocated_traces->at(i);
 461       oop value = trace.load_oop();
 462       if (is_alive->do_object_b(value)) {
 463         // Update the oop to point to the new object if it is still alive.
 464         f->do_oop(trace.get_oop_addr());


 474         // If the old trace is no longer live, add it to the list of
 475         // recently collected garbage.
 476         store_garbage_trace(trace);
 477       }
 478     }
 479 
 480     // Zero out remaining array elements.  Even though the call to trunc_to
 481     // below truncates these values, zeroing them out is good practice.
 482     StackTraceDataWithOop zero_trace;
 483     for (int i = count; i < len; i++) {
 484       _allocated_traces->at_put(i, zero_trace);
 485     }
 486 
 487     // Set the array's length to the number of live elements.
 488     _allocated_traces->trunc_to(count);
 489   }
 490 
 491   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 492 }
 493 
 494 bool StackTraceStorage::deep_copy(jvmtiStackTrace* to,
 495                                   const StackTraceData* from) {
 496   const jvmtiStackTrace* src = from->get_trace();
 497   *to = *src;
 498 
 499   to->frames =
 500       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
 501 
 502   if (to->frames == NULL) {
 503     return false;
 504   }
 505 
 506   memcpy(to->frames,
 507          src->frames,
 508          sizeof(jvmtiFrameInfo) * src->frame_count);
 509   return true;
 510 }
 511 
 512 // Called by the outside world; returns a copy of the stack traces
 513 // (because we could be replacing them as the user handles them).
 514 // The array is secretly null-terminated (to make it easier to reclaim).
 515 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) {


 516   MutexLocker mu(HeapMonitorStorage_lock);
 517   if (!_allocated_traces) {
 518     traces->stack_traces = NULL;
 519     traces->trace_count = 0;
 520     return;
 521   }
 522 
 523   LiveStackTraceDataCopier copier(_allocated_traces);
 524   copy_stack_traces(copier, traces);
 525 }
 526 
 527 // See comment on get_all_stack_traces
 528 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) {


 529   MutexLocker mu(HeapMonitorStorage_lock);
 530   if (!_recent_garbage_traces) {
 531     traces->stack_traces = NULL;
 532     traces->trace_count = 0;
 533     return;
 534   }
 535 
 536   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 537                                      _recent_garbage_traces->size());
 538   copy_stack_traces(copier, traces);
 539 }
 540 
 541 // See comment on get_all_stack_traces
 542 void StackTraceStorage::get_frequent_garbage_stack_traces(
 543     jvmtiStackTraces* traces) {
 544   MutexLocker mu(HeapMonitorStorage_lock);
 545   if (!_frequent_garbage_traces) {
 546     traces->stack_traces = NULL;
 547     traces->trace_count = 0;
 548     return;
 549   }
 550 
 551   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 552                                      _frequent_garbage_traces->size());
 553   copy_stack_traces(copier, traces);
 554 }
 555 
 556 // See comment on get_all_stack_traces
 557 void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) {


 558   MutexLocker mu(HeapMonitorStorage_lock);
 559   if (!_traces_on_last_full_gc) {
 560     traces->stack_traces = NULL;
 561     traces->trace_count = 0;
 562     return;
 563   }
 564 
 565   LiveStackTraceDataCopier copier(_traces_on_last_full_gc);
 566   copy_stack_traces(copier, traces);
 567 }
 568 
 569 void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
 570                                           jvmtiStackTraces* traces) {
 571   int len = copier.size();
 572 
 573   // Create a new array to store the StackTraceData objects.
 574   // + 1 for a NULL at the end.
 575   jvmtiStackTrace* t =
 576       NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
 577   if (t == NULL) {
 578     traces->stack_traces = NULL;
 579     traces->trace_count = 0;
 580     return;



 581   }
 582   // +1 to have a NULL at the end of the array.
 583   memset(t, 0, (len + 1) * sizeof(*t));
 584 
 585   // Copy the StackTraceData objects into the new array.
 586   int trace_count = 0;






 587   for (int i = 0; i < len; i++) {
 588     const StackTraceData* stack_trace = copier.get(i);

 589     if (stack_trace != NULL) {
 590       jvmtiStackTrace* to = &t[trace_count];
 591       if (!deep_copy(to, stack_trace)) {
 592         continue;

























































 593       }
 594       trace_count++;



 595     }
 596   }
 597 
 598   traces->stack_traces = t;
 599   traces->trace_count = trace_count;


































 600 }
 601 
 602 void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) {
 603   StackTraceData* new_trace = new StackTraceData(trace.get_trace());
 604 
 605   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 606 
 607   // Accepted is on the right of the boolean to force the store_trace to happen.
 608   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 609 
 610   if (!accepted) {
 611     // No one wanted to use it.
 612     delete new_trace;
 613   }
 614 
 615   _stats.garbage_collected_samples++;
 616 }
 617 
 618 void HeapMonitoring::get_live_traces(jvmtiStackTraces* traces) {
 619   StackTraceStorage::storage()->get_all_stack_traces(traces);




 620 }
 621 
 622 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) {
 623   const jvmtiHeapSamplingStats& internal_stats =
 624       StackTraceStorage::storage()->get_heap_sampling_stats();
 625   *stats = internal_stats;
 626 }
 627 
 628 void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces* traces) {
 629   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
 630 }
 631 
 632 void HeapMonitoring::get_garbage_traces(jvmtiStackTraces* traces) {
 633   StackTraceStorage::storage()->get_garbage_stack_traces(traces);
 634 }
 635 
 636 void HeapMonitoring::get_cached_traces(jvmtiStackTraces* traces) {
 637   StackTraceStorage::storage()->get_cached_stack_traces(traces);
 638 }
 639 
 640 void HeapMonitoring::release_traces(jvmtiStackTraces* traces) {
 641   jint trace_count = traces->trace_count;
 642   jvmtiStackTrace* stack_traces = traces->stack_traces;
 643 
 644   for (jint i = 0; i < trace_count; i++) {
 645     jvmtiStackTrace* current_trace = stack_traces + i;
 646     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
 647   }
 648 
 649   FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
 650   traces->trace_count = 0;
 651   traces->stack_traces = NULL;
 652 }
 653 
 654 // Invoked by the GC to clean up old stack traces and remove old arrays
 655 // of instrumentation that are still lying around.
 656 void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 657   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 658   StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 659 }
 660 
 661 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
 662                                           jint max_gc_storage) {
 663   MutexLocker mu(HeapMonitor_lock);
 664   // Ignore if already enabled.
 665   if (enabled()) {
 666     return;
 667   }
 668 
 669   _monitoring_rate = monitoring_rate;
 670 
 671   // Populate the lookup table for fast_log2.


 714   // for sample_parameter = 1<<19, max possible step is
 715   // 9448372 bytes (24 bits).
 716   const uint64_t PrngModPower = 48;  // Number of bits in prng
 717   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 718   // under piii debug for some binaries.
 719   double q = static_cast<uint32_t>(_rnd >> (PrngModPower - 26)) + 1.0;
 720   // Put the computed p-value through the CDF of a geometric.
 721   // For faster performance (save ~1/20th exec time), replace
 722   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 723   // The value 26.000705 is used rather than 26 to compensate
 724   // for inaccuracies in FastLog2 which otherwise result in a
 725   // negative answer.
 726   double log_val = (fast_log2(q) - 26);
 727   size_t rate = static_cast<size_t>(
 728       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 729   *ptr = rate;
 730 
 731   StackTraceStorage::storage()->accumulate_sample_rate(rate);
 732 }
 733 
 734 void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, intx byte_size) {
 735   JavaThread* thread = static_cast<JavaThread*>(t);
 736   if (StackTraceStorage::storage()->initialized()) {
 737     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 738     JavaThread* thread = static_cast<JavaThread*>(t);
 739 
 740     jvmtiStackTrace* trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);





 741     if (trace == NULL) {

 742       return;
 743     }

 744 
 745     jvmtiFrameInfo* frames =
 746         NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 747 
 748     if (frames == NULL) {

 749       FREE_C_HEAP_OBJ(trace);
 750       return;
 751     }


 752 
 753     trace->frames = frames;
 754     trace->thread_id = SharedRuntime::get_java_tid(thread);
 755     trace->size = byte_size;
 756     trace->frame_count = 0;
 757 
 758     if (thread->has_last_Java_frame()) { // just to be safe
 759       vframeStream vfst(thread, true);
 760       int count = 0;
 761       while (!vfst.at_end() && count < MaxStackDepth) {
 762         Method* m = vfst.method();
 763         frames[count].location = vfst.bci();
 764         frames[count].method = m->jmethod_id();
 765         count++;
 766 
 767         vfst.next();
 768       }
 769       trace->frame_count = count;
 770     }
 771 
 772     if (trace->frame_count> 0) {
 773       // Success!
 774       StackTraceStorage::storage()->add_trace(trace, o);
 775       return;
 776     }
 777 
 778     // Failure!
 779     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);

 780     FREE_C_HEAP_OBJ(trace);
 781   }
 782 }


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "prims/jvmtiEnvBase.hpp"
  30 #include "runtime/heapMonitoring.hpp"
  31 #include "runtime/orderAccess.inline.hpp"
  32 #include "runtime/vframe.hpp"
  33 
  34 static const int MaxStackDepth = 1024;
  35 
  36 // Internal data structure representing traces, used when object has been GC'd.
  37 class StackTraceData : public CHeapObj<mtInternal> {
  38  private:
  39   jvmtiAllocTraceInfo* _trace;
  40   int _references;
  41 
  42  public:
  43   StackTraceData(jvmtiAllocTraceInfo* t) : _trace(t), _references(0) {}
  44 
  45   void increment_reference_count() {
  46     _references++;
  47   }
  48 
  49   jvmtiAllocTraceInfo* get_trace() const {
  50     return _trace;
  51   }
  52 
  53   static void unreference_and_free(StackTraceData* data) {
  54     if (!data) {
  55       return;
  56     }
  57 
  58     data->_references--;
  59     if (data->_references == 0) {
  60       if (data->_trace != NULL) {
  61         jvmtiStackInfo* stack_info = data->_trace->stack_info;
  62         FREE_C_HEAP_ARRAY(jvmtiFrameInfo, stack_info->frame_buffer);
  63         FREE_C_HEAP_OBJ(stack_info);
  64         FREE_C_HEAP_OBJ(data->_trace);
  65       }
  66       delete data;
  67     }
  68   }
  69 };
  70 
  71 // Internal data structure representing traces with the oop, used while object
  72 // is live. Since this structure just passes the trace to the GC lists, it does
  73 // not handle any freeing.
  74 class StackTraceDataWithOop : public StackTraceData {
  75  private:
  76   oop _obj;
  77 
  78  public:
  79   StackTraceDataWithOop(jvmtiAllocTraceInfo* t, oop o) : StackTraceData(t) {
  80     store_oop(o);
  81   }
  82 
  83   StackTraceDataWithOop() : StackTraceData(NULL), _obj(NULL) {
  84   }
  85 
  86   oop load_oop() {
  87     return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(&_obj);
  88   }
  89 
  90   oop* get_oop_addr() {
  91     return &_obj;
  92   }
  93 
  94   void store_oop(oop value) {
  95     RootAccess<ON_PHANTOM_OOP_REF>::oop_store(&_obj, value);
  96   }
  97 
  98   void clear_oop() {
  99     store_oop(reinterpret_cast<oop>(NULL));


 204 
 205   virtual bool select_replacement(uint32_t* index) {
 206     *index = _garbage_traces_pos;
 207 
 208     _garbage_traces_pos =
 209         (_garbage_traces_pos + 1) % _size;
 210 
 211     return true;
 212   }
 213 
 214  private:
 215   // The current position in the buffer.
 216   uint32_t _garbage_traces_pos;
 217 };
 218 
 219 // Each object that we profile is stored as trace with the thread_id.
 220 class StackTraceStorage : public CHeapObj<mtInternal> {
 221  public:
 222   // The function that gets called to add a trace to the list of
 223   // traces we are maintaining.
 224   void add_trace(jvmtiAllocTraceInfo* trace, oop o);
 225 
 226   // The function that gets called by the client to retrieve the list
 227   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 228   void get_all_stack_traces(JvmtiEnv* env,
 229                             jvmtiAllocTraceInfo** traces,
 230                             jint* trace_counter_ptr);
 231 
 232   // The function that gets called by the client to retrieve the list
 233   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 234   void get_garbage_stack_traces(JvmtiEnv* env,
 235                                 jvmtiAllocTraceInfo** traces,
 236                                 jint* trace_counter_ptr);
 237 
 238   // The function that gets called by the client to retrieve the list
 239   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 240   void get_frequent_garbage_stack_traces(JvmtiEnv* env,
 241                                          jvmtiAllocTraceInfo** traces,
 242                                          jint* trace_counter_ptr);
 243 
 244   // The function that gets called by the client to retrieve the list
 245   // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
 246   void get_cached_stack_traces(JvmtiEnv* env,
 247                                jvmtiAllocTraceInfo** traces,
 248                                jint* trace_counter_ptr);
 249 
 250   // Executes whenever weak references are traversed.  is_alive tells
 251   // you if the given oop is still reachable and live.
 252   void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 253 
 254   ~StackTraceStorage();
 255   StackTraceStorage();
 256 
 257   static StackTraceStorage* storage() {
 258     static StackTraceStorage internal_storage;
 259     return &internal_storage;
 260   }
 261 
 262   void initialize(int max_storage) {
 263     MutexLocker mu(HeapMonitorStorage_lock);
 264     allocate_storage(max_storage);
 265   }
 266 
 267   void stop() {
 268     MutexLocker mu(HeapMonitorStorage_lock);
 269     free_storage();
 270   }
 271 
 272   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 273     MutexLocker mu(HeapMonitorStorage_lock);
 274     return _stats;
 275   }
 276 
 277   void accumulate_sample_rate(size_t rate) {
 278     MutexLocker mu(HeapMonitorStorage_lock);
 279     _stats.sample_rate_accumulation += rate;
 280     _stats.sample_rate_count++;
 281   }
 282 
 283   bool initialized() {
 284     return OrderAccess::load_acquire(&_initialized) != 0;
 285     return _initialized;
 286   }
 287 
 288  private:
 289   // The traces currently sampled.
 290   GrowableArray<StackTraceDataWithOop>* _allocated_traces;
 291 
 292   // The traces currently sampled.
 293   GrowableArray<StackTraceDataWithOop>* _traces_on_last_full_gc;
 294 
 295   // Recent garbage traces.
 296   MostRecentGarbageTraces* _recent_garbage_traces;
 297 
 298   // Frequent garbage traces.
 299   FrequentGarbageTraces* _frequent_garbage_traces;
 300 
 301   // Heap Sampling statistics.
 302   jvmtiHeapSamplingStats _stats;
 303 
 304   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 305   int _max_gc_storage;


 321         _data(data) {}
 322     int size() const { return _data ? _data->length() : 0; }
 323     const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); }
 324 
 325    private:
 326     GrowableArray<StackTraceDataWithOop>* _data;
 327   };
 328 
 329   class GarbageStackTraceDataCopier : public StackTraceDataCopier {
 330    public:
 331     GarbageStackTraceDataCopier(StackTraceData** data, int size) :
 332         _data(data), _size(size) {}
 333     int size() const { return _size; }
 334     const StackTraceData* get(uint32_t i) const { return _data[i]; }
 335 
 336    private:
 337     StackTraceData** _data;
 338     int _size;
 339   };
 340 



 341   // Creates a deep copy of the list of StackTraceData.
 342   void copy_stack_traces(JvmtiEnv* env,
 343                          const StackTraceDataCopier &copier,
 344                          jvmtiAllocTraceInfo** traces,
 345                          jint* trace_counter_ptr);
 346 
 347   void store_garbage_trace(const StackTraceDataWithOop &trace);
 348 
 349   void free_garbage();
 350   void free_storage();
 351   void reset();
 352 
 353   void allocate_storage(int max_gc_storage);
 354 
 355   int calculate_frame_count(const StackTraceDataCopier &copier);
 356   int calculate_info_count(const StackTraceDataCopier &copier);
 357 
 358   bool copy_frame(const StackTraceData* stack_trace_data,
 359                   jvmtiAllocTraceInfo* current_alloc_traces,
 360                   jvmtiStackInfo* current_stack_info,
 361                   jvmtiFrameInfo* current_frame_info);
 362 
 363   // Returns frame copy success. Failure can result when there is no longer
 364   // enough memory.
 365   bool copy_frames(const StackTraceDataCopier& copier, int info_count,
 366                    unsigned char* start,
 367                    unsigned char* end);
 368 };
 369 
 370 StackTraceStorage* StackTraceStorage::internal_storage;
 371 
 372 // Statics for Sampler
 373 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 374 int HeapMonitoring::_enabled;
 375 jint HeapMonitoring::_monitoring_rate;
 376 
 377 // Cheap random number generator
 378 uint64_t HeapMonitoring::_rnd;
 379 
 380 StackTraceStorage::StackTraceStorage() {
 381   MutexLocker mu(HeapMonitorStorage_lock);
 382   reset();
 383 }
 384 
 385 void StackTraceStorage::reset() {
 386   assert(HeapMonitorStorage_lock->owned_by_self()
 387          || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
 388          "This should not be accessed concurrently");
 389 
 390   _allocated_traces = NULL;
 391   _traces_on_last_full_gc = NULL;
 392   _recent_garbage_traces = NULL;
 393   _frequent_garbage_traces = NULL;
 394   _max_gc_storage = 0;
 395   OrderAccess::release_store(&_initialized, 0);
 396 }
 397 
 398 void StackTraceStorage::free_garbage() {
 399   StackTraceData** recent_garbage = NULL;
 400   uint32_t recent_size = 0;
 401 
 402   StackTraceData** frequent_garbage = NULL;
 403   uint32_t frequent_size = 0;
 404 
 405   if (_recent_garbage_traces != NULL) {
 406     recent_garbage = _recent_garbage_traces->get_traces();
 407     recent_size = _recent_garbage_traces->size();
 408   }
 409 


 428   if (!initialized()) {
 429     return;
 430   }
 431 
 432   delete _allocated_traces;
 433   delete _traces_on_last_full_gc;
 434 
 435   free_garbage();
 436   delete _recent_garbage_traces;
 437   delete _frequent_garbage_traces;
 438 
 439   reset();
 440 }
 441 
 442 StackTraceStorage::~StackTraceStorage() {
 443   MutexLocker mu(HeapMonitorStorage_lock);
 444   free_storage();
 445 }
 446 
 447 void StackTraceStorage::allocate_storage(int max_gc_storage) {
 448   assert(HeapMonitorStorage_lock->owned_by_self()
 449          || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
 450          "This should not be accessed concurrently");
 451 
 452   // In case multiple threads got locked and then 1 by 1 got through.
 453   if (initialized()) {
 454     return;
 455   }
 456 
 457   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 458       GrowableArray<StackTraceDataWithOop>(128, true);
 459   _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
 460       GrowableArray<StackTraceDataWithOop>(128, true);
 461 
 462   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 463   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 464 
 465   _max_gc_storage = max_gc_storage;
 466   memset(&_stats, 0, sizeof(_stats));
 467   OrderAccess::release_store(&_initialized, 1);
 468 }
 469 
 470 void StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) {
 471   MutexLocker mu(HeapMonitorStorage_lock);
 472   // Last minute check on initialization here in case:
 473   //   Between the moment object_alloc_do_sample's check for initialization
 474   //   and now, there was a stop() that deleted the data.
 475   if (initialized()) {
 476     StackTraceDataWithOop new_data(trace, o);
 477     _stats.sample_count++;
 478     _stats.stack_depth_accumulation += trace->stack_info->frame_count;
 479     _allocated_traces->append(new_data);
 480   }
 481 }
 482 
 483 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
 484                                      OopClosure* f) {
 485   size_t count = 0;
 486   if (initialized()) {
 487     int len = _allocated_traces->length();
 488 
 489     _traces_on_last_full_gc->clear();
 490 
 491     // Compact the oop traces.  Moves the live oops to the beginning of the
 492     // growable array, potentially overwriting the dead ones.
 493     for (int i = 0; i < len; i++) {
 494       StackTraceDataWithOop &trace = _allocated_traces->at(i);
 495       oop value = trace.load_oop();
 496       if (is_alive->do_object_b(value)) {
 497         // Update the oop to point to the new object if it is still alive.
 498         f->do_oop(trace.get_oop_addr());


 508         // If the old trace is no longer live, add it to the list of
 509         // recently collected garbage.
 510         store_garbage_trace(trace);
 511       }
 512     }
 513 
 514     // Zero out remaining array elements.  Even though the call to trunc_to
 515     // below truncates these values, zeroing them out is good practice.
 516     StackTraceDataWithOop zero_trace;
 517     for (int i = count; i < len; i++) {
 518       _allocated_traces->at_put(i, zero_trace);
 519     }
 520 
 521     // Set the array's length to the number of live elements.
 522     _allocated_traces->trunc_to(count);
 523   }
 524 
 525   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 526 }
 527 


















 528 // Called by the outside world; returns a copy of the stack traces
 529 // (because we could be replacing them as the user handles them).
 530 // The array is secretly null-terminated (to make it easier to reclaim).
 531 void StackTraceStorage::get_all_stack_traces(JvmtiEnv* env,
 532                                              jvmtiAllocTraceInfo** traces,
 533                                              jint* trace_counter_ptr) {
 534   MutexLocker mu(HeapMonitorStorage_lock);
 535   if (!_allocated_traces) {
 536     *traces = NULL;
 537     *trace_counter_ptr = 0;
 538     return;
 539   }
 540 
 541   LiveStackTraceDataCopier copier(_allocated_traces);
 542   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 543 }
 544 
 545 // See comment on get_all_stack_traces
 546 void StackTraceStorage::get_garbage_stack_traces(JvmtiEnv* env,
 547                                                  jvmtiAllocTraceInfo** traces,
 548                                                  jint* trace_counter_ptr) {
 549   MutexLocker mu(HeapMonitorStorage_lock);
 550   if (!_recent_garbage_traces) {
 551     *traces = NULL;
 552     *trace_counter_ptr = 0;
 553     return;
 554   }
 555 
 556   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 557                                      _recent_garbage_traces->size());
 558   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 559 }
 560 
 561 // See comment on get_all_stack_traces
 562 void StackTraceStorage::get_frequent_garbage_stack_traces(
 563     JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
 564   MutexLocker mu(HeapMonitorStorage_lock);
 565   if (!_frequent_garbage_traces) {
 566     *traces = NULL;
 567     *trace_counter_ptr = 0;
 568     return;
 569   }
 570 
 571   GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
 572                                      _frequent_garbage_traces->size());
 573   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 574 }
 575 
 576 // See comment on get_all_stack_traces
 577 void StackTraceStorage::get_cached_stack_traces(JvmtiEnv* env,
 578                                                 jvmtiAllocTraceInfo** traces,
 579                                                 jint* trace_counter_ptr) {
 580   MutexLocker mu(HeapMonitorStorage_lock);
 581   if (!_traces_on_last_full_gc) {
 582     *traces = NULL;
 583     *trace_counter_ptr = 0;
 584     return;
 585   }
 586 
 587   LiveStackTraceDataCopier copier(_traces_on_last_full_gc);
 588   copy_stack_traces(env, copier, traces, trace_counter_ptr);
 589 }
 590 
 591 int StackTraceStorage::calculate_frame_count(const StackTraceDataCopier &copier) {

 592   int len = copier.size();
 593 
 594   // Walk the traces first to find the size of the frames as well.
 595   int frame_total = 0;
 596 
 597   for (int i = 0; i < len; i++) {
 598     const StackTraceData* stack_trace = copier.get(i);
 599 
 600     if (stack_trace != NULL) {
 601       jvmtiAllocTraceInfo* trace = stack_trace->get_trace();
 602       jvmtiStackInfo* stack_info = trace->stack_info;
 603       frame_total += stack_info->frame_count;
 604     }
 605   }


 606 
 607   return frame_total;
 608 }
 609 
 610 int StackTraceStorage::calculate_info_count(const StackTraceDataCopier &copier) {
 611   int len = copier.size();
 612 
 613   int info_total = 0;
 614 
 615   for (int i = 0; i < len; i++) {
 616     const StackTraceData* stack_trace = copier.get(i);
 617 
 618     if (stack_trace != NULL) {
 619       // TODO: merge this with the method above.
 620       info_total++;
 621     }
 622   }
 623 
 624   return info_total;
 625 }
 626 
 627 // Method to test if the data structure would fit between the src address and
 628 // the end address.
 629 template<typename T, typename U>
 630 static bool next_ptr_less_or_equal(T src, U* end) {
 631   return (src + 1) <= reinterpret_cast<T>(end);
 632 }
 633 
 634 bool StackTraceStorage::copy_frame(const StackTraceData* stack_trace_data,
 635                                    jvmtiAllocTraceInfo* current_alloc_trace,
 636                                    jvmtiStackInfo* current_stack_info,
 637                                    jvmtiFrameInfo* current_frame_info) {
 638   jvmtiAllocTraceInfo* trace = stack_trace_data->get_trace();
 639   jvmtiStackInfo* stack_info = trace->stack_info;
 640   int frame_count = stack_info->frame_count;
 641 
 642   memcpy(current_alloc_trace, trace, sizeof(*trace));
 643 
 644   current_alloc_trace->stack_info = current_stack_info;
 645   memcpy(current_stack_info, stack_info, sizeof(*stack_info));
 646 
 647   current_stack_info->frame_buffer = current_frame_info;
 648   memcpy(current_frame_info, stack_info->frame_buffer,
 649          sizeof(jvmtiFrameInfo) * frame_count);
 650   return true;
 651 }
 652 
 653 bool StackTraceStorage::copy_frames(const StackTraceDataCopier& copier,
 654                                     int info_count,
 655                                     unsigned char* start,
 656                                     unsigned char* end) {
 657   jvmtiAllocTraceInfo* start_alloc_trace = reinterpret_cast<jvmtiAllocTraceInfo*>(start);
 658   jvmtiStackInfo* start_stack_info = reinterpret_cast<jvmtiStackInfo*>(start_alloc_trace + info_count);
 659   jvmtiFrameInfo* start_frame_info = reinterpret_cast<jvmtiFrameInfo*>(start_stack_info + info_count);
 660 
 661   jvmtiAllocTraceInfo* current_alloc_trace = start_alloc_trace;
 662   jvmtiStackInfo* current_stack_info = start_stack_info;
 663   jvmtiFrameInfo* current_frame_info = start_frame_info;
 664 
 665   for (int i = 0; i < info_count; i++) {
 666     assert(next_ptr_less_or_equal(current_alloc_trace, start_stack_info),
 667            "jvmtiAllocTraceInfo would write over jvmtiStackInfos.");
 668     assert(next_ptr_less_or_equal(current_stack_info, start_frame_info),
 669            "jvmtiStackInfo would write over jvmtiFrameInfos.");
 670 
 671     assert(next_ptr_less_or_equal(current_frame_info, end),
 672            "jvmtiFrameInfo would write over the end of the buffer.");
 673 
 674     const StackTraceData* stack_trace_data = copier.get(i);
 675     if (stack_trace_data != NULL) {
 676       if (!copy_frame(stack_trace_data, current_alloc_trace,
 677                       current_stack_info, current_frame_info)) {
 678         return false;
 679       }
 680 
 681       current_frame_info += current_stack_info->frame_count;
 682       current_stack_info++;
 683       current_alloc_trace++;
 684     }
 685   }
 686 
 687   return true;
 688 }
 689 
 690 void StackTraceStorage::copy_stack_traces(JvmtiEnv* env,
 691                                           const StackTraceDataCopier& copier,
 692                                           jvmtiAllocTraceInfo** traces,
 693                                           jint* trace_counter_ptr) {
 694   *traces = NULL;
 695   *trace_counter_ptr = 0;
 696 
 697   int frame_total = calculate_frame_count(copier);
 698   int len = calculate_info_count(copier);
 699 
 700   // Allocate the whole stacktraces in one bloc to simplify freeing.
 701   size_t total_size = len * sizeof(jvmtiAllocTraceInfo)
 702       + len * sizeof(jvmtiStackInfo)
 703       + frame_total * sizeof(jvmtiFrameInfo);
 704 
 705   unsigned char* buffer = NULL;
 706   jvmtiAllocTraceInfo* result = NULL;
 707   JvmtiEnvBase* env_base = reinterpret_cast<JvmtiEnvBase*>(env);
 708   env_base->allocate(total_size, &buffer);
 709 
 710   if (buffer == NULL) {
 711     return;
 712   }
 713 
 714   bool success = copy_frames(copier, len, buffer, buffer + total_size);
 715 
 716   if (!success) {
 717     env_base->deallocate(buffer);
 718     return;
 719   }
 720 
 721   *trace_counter_ptr = len;
 722   *traces = reinterpret_cast<jvmtiAllocTraceInfo*>(buffer);
 723 }
 724 
 725 void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) {
 726   StackTraceData* new_trace = new StackTraceData(trace.get_trace());
 727 
 728   bool accepted = _recent_garbage_traces->store_trace(new_trace);
 729 
 730   // Accepted is on the right of the boolean to force the store_trace to happen.
 731   accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted;
 732 
 733   if (!accepted) {
 734     // No one wanted to use it.
 735     delete new_trace;
 736   }
 737 
 738   _stats.garbage_collected_samples++;
 739 }
 740 
 741 void HeapMonitoring::get_live_traces(JvmtiEnv* env,
 742                                      jvmtiAllocTraceInfo** traces,
 743                                      jint* trace_counter_ptr) {
 744   StackTraceStorage::storage()->get_all_stack_traces(env,
 745                                                      traces,
 746                                                      trace_counter_ptr);
 747 }
 748 
 749 void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) {
 750   const jvmtiHeapSamplingStats& internal_stats =
 751       StackTraceStorage::storage()->get_heap_sampling_stats();
 752   *stats = internal_stats;
 753 }
 754 
 755 void HeapMonitoring::get_frequent_garbage_traces(JvmtiEnv* env,
 756                                                  jvmtiAllocTraceInfo** traces,
 757                                                  jint* trace_counter_ptr) {
 758   StackTraceStorage::storage()->get_frequent_garbage_stack_traces(
 759       env, traces, trace_counter_ptr);
 760 }
 761 
 762 void HeapMonitoring::get_garbage_traces(JvmtiEnv* env,
 763                                         jvmtiAllocTraceInfo** traces,
 764                                         jint* trace_counter_ptr) {
 765   StackTraceStorage::storage()->get_garbage_stack_traces(env,
 766                                                          traces,
 767                                                          trace_counter_ptr);
 768 }
 769 
 770 void HeapMonitoring::get_cached_traces(JvmtiEnv* env,
 771                                        jvmtiAllocTraceInfo** traces,
 772                                        jint* trace_counter_ptr) {
 773   StackTraceStorage::storage()->get_cached_stack_traces(env,
 774                                                         traces,
 775                                                         trace_counter_ptr);



 776 }
 777 
 778 // Invoked by the GC to clean up old stack traces and remove old arrays
 779 // of instrumentation that are still lying around.
 780 void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 781   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 782   StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 783 }
 784 
 785 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
 786                                           jint max_gc_storage) {
 787   MutexLocker mu(HeapMonitor_lock);
 788   // Ignore if already enabled.
 789   if (enabled()) {
 790     return;
 791   }
 792 
 793   _monitoring_rate = monitoring_rate;
 794 
 795   // Populate the lookup table for fast_log2.


 838   // for sample_parameter = 1<<19, max possible step is
 839   // 9448372 bytes (24 bits).
 840   const uint64_t PrngModPower = 48;  // Number of bits in prng
 841   // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
 842   // under piii debug for some binaries.
 843   double q = static_cast<uint32_t>(_rnd >> (PrngModPower - 26)) + 1.0;
 844   // Put the computed p-value through the CDF of a geometric.
 845   // For faster performance (save ~1/20th exec time), replace
 846   // min(0.0, FastLog2(q) - 26)  by  (Fastlog2(q) - 26.000705)
 847   // The value 26.000705 is used rather than 26 to compensate
 848   // for inaccuracies in FastLog2 which otherwise result in a
 849   // negative answer.
 850   double log_val = (fast_log2(q) - 26);
 851   size_t rate = static_cast<size_t>(
 852       (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1);
 853   *ptr = rate;
 854 
 855   StackTraceStorage::storage()->accumulate_sample_rate(rate);
 856 }
 857 
 858 void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) {
 859   JavaThread* thread = static_cast<JavaThread*>(t);
 860   if (StackTraceStorage::storage()->initialized()) {
 861     assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
 862     JavaThread* thread = static_cast<JavaThread*>(t);
 863 
 864     jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal);
 865     if (trace == NULL) {
 866       return;
 867     }
 868 
 869     jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal);
 870     if (trace == NULL) {
 871       FREE_C_HEAP_OBJ(trace);
 872       return;
 873     }
 874     trace->stack_info = stack_info;
 875 
 876     jvmtiFrameInfo* frames =
 877         NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 878 
 879     if (frames == NULL) {
 880       FREE_C_HEAP_OBJ(stack_info);
 881       FREE_C_HEAP_OBJ(trace);
 882       return;
 883     }
 884     stack_info->frame_buffer = frames;
 885     stack_info->frame_count = 0;
 886 

 887     trace->thread_id = SharedRuntime::get_java_tid(thread);
 888     trace->size = byte_size;

 889 
 890     if (thread->has_last_Java_frame()) { // just to be safe
 891       vframeStream vfst(thread, true);
 892       int count = 0;
 893       while (!vfst.at_end() && count < MaxStackDepth) {
 894         Method* m = vfst.method();
 895         frames[count].location = vfst.bci();
 896         frames[count].method = m->jmethod_id();
 897         count++;
 898 
 899         vfst.next();
 900       }
 901       stack_info->frame_count = count;
 902     }
 903 
 904     if (stack_info->frame_count > 0) {
 905       // Success!
 906       StackTraceStorage::storage()->add_trace(trace, o);
 907       return;
 908     }
 909 
 910     // Failure!
 911     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames);
 912     FREE_C_HEAP_OBJ(stack_info);
 913     FREE_C_HEAP_OBJ(trace);
 914   }
 915 }
< prev index next >