< prev index next >

src/hotspot/share/runtime/heapMonitoring.cpp

Print this page
rev 47223 : [mq]: heapz8
rev 47224 : [mq]: heap9a
rev 47225 : [mq]: heap10


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/heapMonitoring.hpp"
  30 #include "runtime/vframe.hpp"
  31 
  32 // DONE:
  33 //  merged printouts
  34 //  broke up the one-liner
  35 //  talk about synchro
  36 //  cleaned up old entry points for C1/interpreter
  37 //  add statistics per GC and log start up initialization.
  38 //  removed the null pointer check during the weak_oops_do walk
  39 //  cleaned up the task_executor
  40 //  fixed the compilation using the option --disable-precompiled-header
  41 
  42 static const int MaxStackDepth = 64;
  43 
  44 // Internal data structure representing traces.
  45 struct StackTraceData : CHeapObj<mtInternal> {
  46   jvmtiStackTrace *trace;
  47   oop obj;
  48   int references;
  49 
  50   StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
  51 
  52   StackTraceData() : trace(NULL), obj(NULL), references(0) {}
  53 
  54   // StackTraceDatas are shared around the board between various lists. So
  55   // handle this by hand instead of having this in the destructor. There are
  56   // cases where the struct is on the stack but holding heap data not to be
  57   // freed.
  58   static void free_data(StackTraceData *data) {
  59     if (data->trace != NULL) {
  60       FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
  61       FREE_C_HEAP_OBJ(data->trace);
  62     }


 245     _stats.sample_rate_count++;
 246   }
 247 
 248   bool initialized() { return _initialized; }
 249   volatile bool *initialized_address() { return &_initialized; }
 250 
 251  private:
 252   // The traces currently sampled.
 253   GrowableArray<StackTraceData> *_allocated_traces;
 254 
 255   // Recent garbage traces.
 256   MostRecentGarbageTraces *_recent_garbage_traces;
 257 
 258   // Frequent garbage traces.
 259   FrequentGarbageTraces *_frequent_garbage_traces;
 260 
 261   // Heap Sampling statistics.
 262   jvmtiHeapSamplingStats _stats;
 263 
 264   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 265   int _max_storage;
 266 
 267   static StackTraceStorage* internal_storage;
 268   volatile bool _initialized;
 269 
 270   // Support functions and classes for copying data to the external
 271   // world.
 272   class StackTraceDataCopier {
 273    public:
 274     virtual int size() const = 0;
 275     virtual const StackTraceData *get(uint32_t i) const = 0;
 276   };
 277 
 278   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 279    public:
 280     LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) :
 281         _data(data) {}
 282     int size() const { return _data ? _data->length() : 0; }
 283     const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
 284 
 285    private:


 311   void store_garbage_trace(const StackTraceData &trace);
 312 
 313   void free_garbage();
 314 };
 315 
 316 StackTraceStorage* StackTraceStorage::internal_storage;
 317 
 318 // Statics for Sampler
 319 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 320 bool HeapMonitoring::_enabled;
 321 AlwaysTrueClosure HeapMonitoring::_always_true;
 322 jint HeapMonitoring::_monitoring_rate;
 323 
 324 // Cheap random number generator
 325 uint64_t HeapMonitoring::_rnd;
 326 
 327 StackTraceStorage::StackTraceStorage() :
 328   _allocated_traces(NULL),
 329   _recent_garbage_traces(NULL),
 330   _frequent_garbage_traces(NULL),
 331   _max_storage(0),
 332   _initialized(false) {
 333     memset(&_stats, 0, sizeof(_stats));
 334 }
 335 
 336 void StackTraceStorage::free_garbage() {
 337   StackTraceData **recent_garbage = NULL;
 338   uint32_t recent_size = 0;
 339 
 340   StackTraceData **frequent_garbage = NULL;
 341   uint32_t frequent_size = 0;
 342 
 343   if (_recent_garbage_traces != NULL) {
 344     recent_garbage = _recent_garbage_traces->get_traces();
 345     recent_size = _recent_garbage_traces->size();
 346   }
 347 
 348   if (_frequent_garbage_traces != NULL) {
 349     frequent_garbage = _frequent_garbage_traces->get_traces();
 350     frequent_size = _frequent_garbage_traces->size();
 351   }


 368     StackTraceData *trace = frequent_garbage[i];
 369     if (trace != NULL) {
 370       trace->references--;
 371 
 372       if (trace->references == 0) {
 373         StackTraceData::free_data(trace);
 374       }
 375     }
 376   }
 377 }
 378 
 379 StackTraceStorage::~StackTraceStorage() {
 380   delete _allocated_traces;
 381 
 382   free_garbage();
 383   delete _recent_garbage_traces;
 384   delete _frequent_garbage_traces;
 385   _initialized = false;
 386 }
 387 
 388 void StackTraceStorage::initialize_storage(int max_storage) {
 389   // In case multiple threads got locked and then 1 by 1 got through.
 390   if (_initialized) {
 391     return;
 392   }
 393 
 394   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 395       GrowableArray<StackTraceData>(128, true);
 396 
 397   _recent_garbage_traces = new MostRecentGarbageTraces(max_storage);
 398   _frequent_garbage_traces = new FrequentGarbageTraces(max_storage);
 399 
 400   _max_storage = max_storage;
 401   _initialized = true;
 402 }
 403 
 404 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
 405   StackTraceData new_data(trace, o);
 406   _stats.sample_count++;
 407   _stats.stack_depth_accumulation += trace->frame_count;
 408   _allocated_traces->append(new_data);
 409 }
 410 
 411 size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
 412                                        OopClosure *f) {
 413   size_t count = 0;
 414   if (is_initialized()) {
 415     int len = _allocated_traces->length();
 416 
 417     // Compact the oop traces.  Moves the live oops to the beginning of the
 418     // growable array, potentially overwriting the dead ones.
 419     int curr_pos = 0;
 420     for (int i = 0; i < len; i++) {


 439     // Zero out remaining array elements.  Even though the call to trunc_to
 440     // below truncates these values, zeroing them out is good practice.
 441     StackTraceData zero_trace;
 442     for (int i = curr_pos; i < len; i++) {
 443       _allocated_traces->at_put(i, zero_trace);
 444     }
 445 
 446     // Set the array's length to the number of live elements.
 447     _allocated_traces->trunc_to(curr_pos);
 448   }
 449 
 450   return count;
 451 }
 452 
 453 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to,
 454                                   const StackTraceData *from) {
 455   const jvmtiStackTrace *src = from->trace;
 456   *to = *src;
 457 
 458   to->frames =
 459       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
 460 
 461   if (to->frames == NULL) {
 462     return false;
 463   }
 464 
 465   memcpy(to->frames,
 466          src->frames,
 467          sizeof(jvmtiFrameInfo) * MaxStackDepth);
 468   return true;
 469 }
 470 
 471 // Called by the outside world; returns a copy of the stack traces
 472 // (because we could be replacing them as the user handles them).
 473 // The array is secretly null-terminated (to make it easier to reclaim).
 474 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) {
 475   LiveStackTraceDataCopier copier(_allocated_traces);
 476   copy_stack_traces(copier, traces);
 477 }
 478 
 479 // See comment on get_all_stack_traces
 480 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
 481   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 482                                      _recent_garbage_traces->size());
 483   copy_stack_traces(copier, traces);
 484 }
 485 
 486 // See comment on get_all_stack_traces
 487 void StackTraceStorage::get_frequent_garbage_stack_traces(


 576   jvmtiStackTrace *stack_traces = traces->stack_traces;
 577 
 578   for (jint i = 0; i < trace_count; i++) {
 579     jvmtiStackTrace *current_trace = stack_traces + i;
 580     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
 581   }
 582 
 583   FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
 584   traces->trace_count = 0;
 585   traces->stack_traces = NULL;
 586 }
 587 
 588 // Invoked by the GC to clean up old stack traces and remove old arrays
 589 // of instrumentation that are still lying around.
 590 size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive,
 591                                     OopClosure *f) {
 592   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 593   return StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 594 }
 595 
 596 void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) {

 597   // Ignore if already enabled.
 598   if (_enabled) {
 599     return;
 600   }
 601 
 602   _monitoring_rate = monitoring_rate;
 603 
 604   // Initalize and reset.
 605   StackTraceStorage::initialize_stack_trace_storage(max_storage);
 606 
 607   // Populate the lookup table for fast_log2.
 608   // This approximates the log2 curve with a step function.
 609   // Steps have height equal to log2 of the mid-point of the step.
 610   for (int i = 0; i < (1 << FastLogNumBits); i++) {
 611     double half_way = static_cast<double>(i + 0.5);
 612     _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
 613   }
 614 
 615   JavaThread *t = static_cast<JavaThread *>(Thread::current());
 616   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 617   if (_rnd == 0) {
 618     _rnd = 1;
 619   }
 620   _enabled = true;
 621 }
 622 
 623 void HeapMonitoring::stop_profiling() {
 624   _enabled = false;
 625 }




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/heapMonitoring.hpp"
  30 #include "runtime/vframe.hpp"
  31 
  32 const int MaxStackDepth = 1024;










  33 
  34 // Internal data structure representing traces.
  35 struct StackTraceData : CHeapObj<mtInternal> {
  36   jvmtiStackTrace *trace;
  37   oop obj;
  38   int references;
  39 
  40   StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {}
  41 
  42   StackTraceData() : trace(NULL), obj(NULL), references(0) {}
  43 
  44   // StackTraceDatas are shared around the board between various lists. So
  45   // handle this by hand instead of having this in the destructor. There are
  46   // cases where the struct is on the stack but holding heap data not to be
  47   // freed.
  48   static void free_data(StackTraceData *data) {
  49     if (data->trace != NULL) {
  50       FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
  51       FREE_C_HEAP_OBJ(data->trace);
  52     }


 235     _stats.sample_rate_count++;
 236   }
 237 
 238   bool initialized() { return _initialized; }
 239   volatile bool *initialized_address() { return &_initialized; }
 240 
 241  private:
 242   // The traces currently sampled.
 243   GrowableArray<StackTraceData> *_allocated_traces;
 244 
 245   // Recent garbage traces.
 246   MostRecentGarbageTraces *_recent_garbage_traces;
 247 
 248   // Frequent garbage traces.
 249   FrequentGarbageTraces *_frequent_garbage_traces;
 250 
 251   // Heap Sampling statistics.
 252   jvmtiHeapSamplingStats _stats;
 253 
 254   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 255   int _max_gc_storage;
 256 
 257   static StackTraceStorage* internal_storage;
 258   volatile bool _initialized;
 259 
 260   // Support functions and classes for copying data to the external
 261   // world.
 262   class StackTraceDataCopier {
 263    public:
 264     virtual int size() const = 0;
 265     virtual const StackTraceData *get(uint32_t i) const = 0;
 266   };
 267 
 268   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 269    public:
 270     LiveStackTraceDataCopier(GrowableArray<StackTraceData> *data) :
 271         _data(data) {}
 272     int size() const { return _data ? _data->length() : 0; }
 273     const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); }
 274 
 275    private:


 301   void store_garbage_trace(const StackTraceData &trace);
 302 
 303   void free_garbage();
 304 };
 305 
 306 StackTraceStorage* StackTraceStorage::internal_storage;
 307 
 308 // Statics for Sampler
 309 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 310 bool HeapMonitoring::_enabled;
 311 AlwaysTrueClosure HeapMonitoring::_always_true;
 312 jint HeapMonitoring::_monitoring_rate;
 313 
 314 // Cheap random number generator
 315 uint64_t HeapMonitoring::_rnd;
 316 
 317 StackTraceStorage::StackTraceStorage() :
 318   _allocated_traces(NULL),
 319   _recent_garbage_traces(NULL),
 320   _frequent_garbage_traces(NULL),
 321   _max_gc_storage(0),
 322   _initialized(false) {
 323     memset(&_stats, 0, sizeof(_stats));
 324 }
 325 
 326 void StackTraceStorage::free_garbage() {
 327   StackTraceData **recent_garbage = NULL;
 328   uint32_t recent_size = 0;
 329 
 330   StackTraceData **frequent_garbage = NULL;
 331   uint32_t frequent_size = 0;
 332 
 333   if (_recent_garbage_traces != NULL) {
 334     recent_garbage = _recent_garbage_traces->get_traces();
 335     recent_size = _recent_garbage_traces->size();
 336   }
 337 
 338   if (_frequent_garbage_traces != NULL) {
 339     frequent_garbage = _frequent_garbage_traces->get_traces();
 340     frequent_size = _frequent_garbage_traces->size();
 341   }


 358     StackTraceData *trace = frequent_garbage[i];
 359     if (trace != NULL) {
 360       trace->references--;
 361 
 362       if (trace->references == 0) {
 363         StackTraceData::free_data(trace);
 364       }
 365     }
 366   }
 367 }
 368 
 369 StackTraceStorage::~StackTraceStorage() {
 370   delete _allocated_traces;
 371 
 372   free_garbage();
 373   delete _recent_garbage_traces;
 374   delete _frequent_garbage_traces;
 375   _initialized = false;
 376 }
 377 
 378 void StackTraceStorage::initialize_storage(int max_gc_storage) {
 379   // In case multiple threads got locked and then 1 by 1 got through.
 380   if (_initialized) {
 381     return;
 382   }
 383 
 384   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 385       GrowableArray<StackTraceData>(128, true);
 386 
 387   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 388   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 389 
 390   _max_gc_storage = max_gc_storage;
 391   _initialized = true;
 392 }
 393 
 394 void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
 395   StackTraceData new_data(trace, o);
 396   _stats.sample_count++;
 397   _stats.stack_depth_accumulation += trace->frame_count;
 398   _allocated_traces->append(new_data);
 399 }
 400 
 401 size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
 402                                        OopClosure *f) {
 403   size_t count = 0;
 404   if (is_initialized()) {
 405     int len = _allocated_traces->length();
 406 
 407     // Compact the oop traces.  Moves the live oops to the beginning of the
 408     // growable array, potentially overwriting the dead ones.
 409     int curr_pos = 0;
 410     for (int i = 0; i < len; i++) {


 429     // Zero out remaining array elements.  Even though the call to trunc_to
 430     // below truncates these values, zeroing them out is good practice.
 431     StackTraceData zero_trace;
 432     for (int i = curr_pos; i < len; i++) {
 433       _allocated_traces->at_put(i, zero_trace);
 434     }
 435 
 436     // Set the array's length to the number of live elements.
 437     _allocated_traces->trunc_to(curr_pos);
 438   }
 439 
 440   return count;
 441 }
 442 
 443 bool StackTraceStorage::deep_copy(jvmtiStackTrace *to,
 444                                   const StackTraceData *from) {
 445   const jvmtiStackTrace *src = from->trace;
 446   *to = *src;
 447 
 448   to->frames =
 449       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
 450 
 451   if (to->frames == NULL) {
 452     return false;
 453   }
 454 
 455   memcpy(to->frames,
 456          src->frames,
 457          sizeof(jvmtiFrameInfo) * src->frame_count);
 458   return true;
 459 }
 460 
 461 // Called by the outside world; returns a copy of the stack traces
 462 // (because we could be replacing them as the user handles them).
 463 // The array is secretly null-terminated (to make it easier to reclaim).
 464 void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) {
 465   LiveStackTraceDataCopier copier(_allocated_traces);
 466   copy_stack_traces(copier, traces);
 467 }
 468 
 469 // See comment on get_all_stack_traces
 470 void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) {
 471   GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
 472                                      _recent_garbage_traces->size());
 473   copy_stack_traces(copier, traces);
 474 }
 475 
 476 // See comment on get_all_stack_traces
 477 void StackTraceStorage::get_frequent_garbage_stack_traces(


 566   jvmtiStackTrace *stack_traces = traces->stack_traces;
 567 
 568   for (jint i = 0; i < trace_count; i++) {
 569     jvmtiStackTrace *current_trace = stack_traces + i;
 570     FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
 571   }
 572 
 573   FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
 574   traces->trace_count = 0;
 575   traces->stack_traces = NULL;
 576 }
 577 
 578 // Invoked by the GC to clean up old stack traces and remove old arrays
 579 // of instrumentation that are still lying around.
 580 size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive,
 581                                     OopClosure *f) {
 582   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 583   return StackTraceStorage::storage()->weak_oops_do(is_alive, f);
 584 }
 585 
 586 void HeapMonitoring::initialize_profiling(jint monitoring_rate,
 587                                           jint max_gc_storage) {
 588   // Ignore if already enabled.
 589   if (_enabled) {
 590     return;
 591   }
 592 
 593   _monitoring_rate = monitoring_rate;
 594 
 595   // Initalize and reset.
 596   StackTraceStorage::initialize_stack_trace_storage(max_gc_storage);
 597 
 598   // Populate the lookup table for fast_log2.
 599   // This approximates the log2 curve with a step function.
 600   // Steps have height equal to log2 of the mid-point of the step.
 601   for (int i = 0; i < (1 << FastLogNumBits); i++) {
 602     double half_way = static_cast<double>(i + 0.5);
 603     _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0));
 604   }
 605 
 606   JavaThread *t = static_cast<JavaThread *>(Thread::current());
 607   _rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
 608   if (_rnd == 0) {
 609     _rnd = 1;
 610   }
 611   _enabled = true;
 612 }
 613 
 614 void HeapMonitoring::stop_profiling() {
 615   _enabled = false;
 616 }


< prev index next >