< prev index next >

src/hotspot/share/runtime/heapMonitoring.cpp

Print this page
rev 48551 : [mq]: heap8
rev 48552 : [mq]: heap10a
rev 48553 : [mq]: heap14_rebased
rev 48555 : [mq]: heap16
rev 48556 : [mq]: heap17
rev 48557 : [mq]: heap17
rev 48558 : [mq]: heap19
rev 48559 : [mq]: heap20


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/heapMonitoring.hpp"

  30 #include "runtime/vframe.hpp"
  31 
  32 static const int MaxStackDepth = 1024;
  33 
  34 // Internal data structure representing traces, used when object has been GC'd.
  35 struct StackTraceData : CHeapObj<mtInternal> {
  36   jvmtiStackTrace* trace;
  37   int references;
  38 
  39   StackTraceData(jvmtiStackTrace* t) : trace(t), references(0) {}
  40 
  41   StackTraceData() : trace(NULL), references(0) {}
  42 
  43   // StackTraceDatas are shared around the board between various lists. So
  44   // handle this by hand instead of having this in the destructor. There are
  45   // cases where the struct is on the stack but holding heap data not to be
  46   // freed.
  47   static void free_data(StackTraceData* data) {
  48     if (data->trace != NULL) {
  49       FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
  50       FREE_C_HEAP_OBJ(data->trace);
  51     }
  52     delete data;
  53   }
  54 };
  55 
  56 // Internal data structure representing traces with the oop, used while object
  57 // is live. Since this structure just passes the trace to the GC lists, it does
  58 // not handle any freeing.
  59 struct StackTraceDataWithOop : public StackTraceData {
  60   oop obj;
  61 
  62   StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t), obj(o) {}


  63 
  64   StackTraceDataWithOop() : StackTraceData(), obj(NULL) {}












  65 };
  66 
  67 // Fixed size buffer for holding garbage traces.
  68 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  69  public:
  70   GarbageTracesBuffer(uint32_t size) : _size(size) {
  71     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
  72                                        size,
  73                                        mtInternal);
  74     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
  75   }
  76 
  77   virtual ~GarbageTracesBuffer() {
  78     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
  79   }
  80 
  81   StackTraceData** get_traces() const {
  82     return _garbage_traces;
  83   }
  84 


 225   void initialize(int max_storage) {
 226     MutexLocker mu(HeapMonitorStorage_lock);
 227     allocate_storage(max_storage);
 228     memset(&_stats, 0, sizeof(_stats));
 229   }
 230 
 231   void stop() {
 232     MutexLocker mu(HeapMonitorStorage_lock);
 233     free_storage();
 234   }
 235 
 236   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 237     return _stats;
 238   }
 239 
 240   void accumulate_sample_rate(size_t rate) {
 241     _stats.sample_rate_accumulation += rate;
 242     _stats.sample_rate_count++;
 243   }
 244 
 245   bool initialized() { return _initialized; }


 246 
 247  private:
 248   // The traces currently sampled.
 249   GrowableArray<StackTraceDataWithOop>* _allocated_traces;
 250 
 251   // The traces currently sampled.
 252   GrowableArray<StackTraceDataWithOop>* _traces_on_last_full_gc;
 253 
 254   // Recent garbage traces.
 255   MostRecentGarbageTraces* _recent_garbage_traces;
 256 
 257   // Frequent garbage traces.
 258   FrequentGarbageTraces* _frequent_garbage_traces;
 259 
 260   // Heap Sampling statistics.
 261   jvmtiHeapSamplingStats _stats;
 262 
 263   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 264   int _max_gc_storage;
 265 
 266   static StackTraceStorage* internal_storage;
 267   volatile bool _initialized;
 268 
 269   // Support functions and classes for copying data to the external
 270   // world.
 271   class StackTraceDataCopier {
 272    public:
 273     virtual int size() const = 0;
 274     virtual const StackTraceData* get(uint32_t i) const = 0;
 275   };
 276 
 277   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 278    public:
 279     LiveStackTraceDataCopier(GrowableArray<StackTraceDataWithOop>* data) :
 280         _data(data) {}
 281     int size() const { return _data ? _data->length() : 0; }
 282     const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); }
 283 
 284    private:
 285     GrowableArray<StackTraceDataWithOop>* _data;
 286   };
 287 


 316 StackTraceStorage* StackTraceStorage::internal_storage;
 317 
 318 // Statics for Sampler
 319 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 320 bool HeapMonitoring::_enabled;
 321 jint HeapMonitoring::_monitoring_rate;
 322 
 323 // Cheap random number generator
 324 uint64_t HeapMonitoring::_rnd;
 325 
 326 StackTraceStorage::StackTraceStorage() {
 327   reset();
 328 }
 329 
 330 void StackTraceStorage::reset() {
 331   _allocated_traces = NULL;
 332   _traces_on_last_full_gc = NULL;
 333   _recent_garbage_traces = NULL;
 334   _frequent_garbage_traces = NULL;
 335   _max_gc_storage = 0;
 336   _initialized = false;
 337 }
 338 
 339 void StackTraceStorage::free_garbage() {
 340   StackTraceData** recent_garbage = NULL;
 341   uint32_t recent_size = 0;
 342 
 343   StackTraceData** frequent_garbage = NULL;
 344   uint32_t frequent_size = 0;
 345 
 346   if (_recent_garbage_traces != NULL) {
 347     recent_garbage = _recent_garbage_traces->get_traces();
 348     recent_size = _recent_garbage_traces->size();
 349   }
 350 
 351   if (_frequent_garbage_traces != NULL) {
 352     frequent_garbage = _frequent_garbage_traces->get_traces();
 353     frequent_size = _frequent_garbage_traces->size();
 354   }
 355 
 356   // Simple solution since this happens at exit.


 363       if (trace->references == 0) {
 364         StackTraceData::free_data(trace);
 365       }
 366     }
 367   }
 368 
 369   // Then go through the frequent and remove those that are now only there.
 370   for (uint32_t i = 0; i < frequent_size; i++) {
 371     StackTraceData* trace = frequent_garbage[i];
 372     if (trace != NULL) {
 373       trace->references--;
 374 
 375       if (trace->references == 0) {
 376         StackTraceData::free_data(trace);
 377       }
 378     }
 379   }
 380 }
 381 
 382 void StackTraceStorage::free_storage() {
 383   if (!_initialized) {
 384     return;
 385   }
 386 
 387   delete _allocated_traces;
 388   delete _traces_on_last_full_gc;
 389 
 390   free_garbage();
 391   delete _recent_garbage_traces;
 392   delete _frequent_garbage_traces;
 393 
 394   reset();
 395 }
 396 
 397 StackTraceStorage::~StackTraceStorage() {
 398   free_storage();
 399 }
 400 
 401 void StackTraceStorage::allocate_storage(int max_gc_storage) {
 402   // In case multiple threads got locked and then 1 by 1 got through.
 403   if (_initialized) {
 404     return;
 405   }
 406 
 407   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 408       GrowableArray<StackTraceDataWithOop>(128, true);
 409   _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
 410       GrowableArray<StackTraceDataWithOop>(128, true);
 411 
 412   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 413   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 414 
 415   _max_gc_storage = max_gc_storage;
 416   _initialized = true;
 417 }
 418 
 419 void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) {
 420   MutexLocker mu(HeapMonitorStorage_lock);
 421   // Last minute check on initialization here in case:
 422   //   Between the moment object_alloc_do_sample's check for initialization
 423   //   and now, there was a stop() that deleted the data.
 424   if (_initialized) {
 425     StackTraceDataWithOop new_data(trace, o);
 426     _stats.sample_count++;
 427     _stats.stack_depth_accumulation += trace->frame_count;
 428     _allocated_traces->append(new_data);
 429   }
 430 }
 431 
 432 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
 433                                      OopClosure* f) {
 434   size_t count = 0;
 435   if (_initialized) {
 436     int len = _allocated_traces->length();
 437 
 438     _traces_on_last_full_gc->clear();
 439 
 440     // Compact the oop traces.  Moves the live oops to the beginning of the
 441     // growable array, potentially overwriting the dead ones.
 442     int curr_pos = 0;
 443     for (int i = 0; i < len; i++) {
 444       StackTraceDataWithOop &trace = _allocated_traces->at(i);
 445       oop value = RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(
 446           &trace.obj);
 447       if (is_alive->do_object_b(value)) {
 448         // Update the oop to point to the new object if it is still alive.
 449         f->do_oop(&(trace.obj));
 450 
 451         // Copy the old trace, if it is still live.
 452         _allocated_traces->at_put(curr_pos++, trace);
 453 
 454         // Store the live trace in a cache, to be served up on /heapz.
 455         _traces_on_last_full_gc->append(trace);
 456 
 457         count++;
 458       } else {


 459         // If the old trace is no longer live, add it to the list of
 460         // recently collected garbage.
 461         store_garbage_trace(trace);
 462       }
 463     }
 464 
 465     // Zero out remaining array elements.  Even though the call to trunc_to
 466     // below truncates these values, zeroing them out is good practice.
 467     StackTraceDataWithOop zero_trace;
 468     for (int i = curr_pos; i < len; i++) {
 469       _allocated_traces->at_put(i, zero_trace);
 470     }
 471 
 472     // Set the array's length to the number of live elements.
 473     _allocated_traces->trunc_to(curr_pos);
 474   }
 475 
 476   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 477 }
 478 
 479 bool StackTraceStorage::deep_copy(jvmtiStackTrace* to,
 480                                   const StackTraceData* from) {
 481   const jvmtiStackTrace* src = from->trace;
 482   *to = *src;
 483 
 484   to->frames =
 485       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
 486 
 487   if (to->frames == NULL) {
 488     return false;
 489   }
 490 
 491   memcpy(to->frames,
 492          src->frames,
 493          sizeof(jvmtiFrameInfo) * src->frame_count);




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/heapMonitoring.hpp"
  30 #include "runtime/orderAccess.inline.hpp"
  31 #include "runtime/vframe.hpp"
  32 
  33 static const int MaxStackDepth = 1024;
  34 
  35 // Internal data structure representing traces, used when object has been GC'd.
  36 struct StackTraceData : CHeapObj<mtInternal> {
  37   jvmtiStackTrace* trace;
  38   int references;
  39 
  40   StackTraceData(jvmtiStackTrace* t) : trace(t), references(0) {}
  41 
  42   StackTraceData() : trace(NULL), references(0) {}
  43 
  44   // StackTraceDatas are shared around the board between various lists. So
  45   // handle this by hand instead of having this in the destructor. There are
  46   // cases where the struct is on the stack but holding heap data not to be
  47   // freed.
  48   static void free_data(StackTraceData* data) {
  49     if (data->trace != NULL) {
  50       FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames);
  51       FREE_C_HEAP_OBJ(data->trace);
  52     }
  53     delete data;
  54   }
  55 };
  56 
  57 // Internal data structure representing traces with the oop, used while object
  58 // is live. Since this structure just passes the trace to the GC lists, it does
  59 // not handle any freeing.
  60 struct StackTraceDataWithOop : public StackTraceData {
  61   oop obj;
  62 
  63   StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) {
  64     store_oop(o);
  65   }
  66 
  67   StackTraceDataWithOop() : StackTraceData(), obj(NULL) {}
  68 
  69   oop load_oop() {
  70     return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(&obj);
  71   }
  72 
  73   void store_oop(oop value) {
  74     RootAccess<ON_PHANTOM_OOP_REF>::oop_store(&obj, value);
  75   }
  76 
  77   void clear_oop() {
  78     store_oop(reinterpret_cast<oop>(NULL));
  79   }
  80 };
  81 
  82 // Fixed size buffer for holding garbage traces.
  83 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  84  public:
  85   GarbageTracesBuffer(uint32_t size) : _size(size) {
  86     _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*,
  87                                        size,
  88                                        mtInternal);
  89     memset(_garbage_traces, 0, sizeof(StackTraceData*) * size);
  90   }
  91 
  92   virtual ~GarbageTracesBuffer() {
  93     FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces);
  94   }
  95 
  96   StackTraceData** get_traces() const {
  97     return _garbage_traces;
  98   }
  99 


 240   void initialize(int max_storage) {
 241     MutexLocker mu(HeapMonitorStorage_lock);
 242     allocate_storage(max_storage);
 243     memset(&_stats, 0, sizeof(_stats));
 244   }
 245 
 246   void stop() {
 247     MutexLocker mu(HeapMonitorStorage_lock);
 248     free_storage();
 249   }
 250 
 251   const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
 252     return _stats;
 253   }
 254 
 255   void accumulate_sample_rate(size_t rate) {
 256     _stats.sample_rate_accumulation += rate;
 257     _stats.sample_rate_count++;
 258   }
 259 
 260   bool initialized() {
 261     return OrderAccess::load_acquire(&_initialized) != 0;
 262   }
 263 
 264  private:
 265   // The traces currently sampled.
 266   GrowableArray<StackTraceDataWithOop>* _allocated_traces;
 267 
 268   // The traces currently sampled.
 269   GrowableArray<StackTraceDataWithOop>* _traces_on_last_full_gc;
 270 
 271   // Recent garbage traces.
 272   MostRecentGarbageTraces* _recent_garbage_traces;
 273 
 274   // Frequent garbage traces.
 275   FrequentGarbageTraces* _frequent_garbage_traces;
 276 
 277   // Heap Sampling statistics.
 278   jvmtiHeapSamplingStats _stats;
 279 
 280   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
 281   int _max_gc_storage;
 282 
 283   static StackTraceStorage* internal_storage;
 284   int _initialized;
 285 
 286   // Support functions and classes for copying data to the external
 287   // world.
 288   class StackTraceDataCopier {
 289    public:
 290     virtual int size() const = 0;
 291     virtual const StackTraceData* get(uint32_t i) const = 0;
 292   };
 293 
 294   class LiveStackTraceDataCopier : public StackTraceDataCopier {
 295    public:
 296     LiveStackTraceDataCopier(GrowableArray<StackTraceDataWithOop>* data) :
 297         _data(data) {}
 298     int size() const { return _data ? _data->length() : 0; }
 299     const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); }
 300 
 301    private:
 302     GrowableArray<StackTraceDataWithOop>* _data;
 303   };
 304 


 333 StackTraceStorage* StackTraceStorage::internal_storage;
 334 
 335 // Statics for Sampler
 336 double HeapMonitoring::_log_table[1 << FastLogNumBits];
 337 bool HeapMonitoring::_enabled;
 338 jint HeapMonitoring::_monitoring_rate;
 339 
 340 // Cheap random number generator
 341 uint64_t HeapMonitoring::_rnd;
 342 
 343 StackTraceStorage::StackTraceStorage() {
 344   reset();
 345 }
 346 
 347 void StackTraceStorage::reset() {
 348   _allocated_traces = NULL;
 349   _traces_on_last_full_gc = NULL;
 350   _recent_garbage_traces = NULL;
 351   _frequent_garbage_traces = NULL;
 352   _max_gc_storage = 0;
 353   OrderAccess::release_store(&_initialized, 0);
 354 }
 355 
 356 void StackTraceStorage::free_garbage() {
 357   StackTraceData** recent_garbage = NULL;
 358   uint32_t recent_size = 0;
 359 
 360   StackTraceData** frequent_garbage = NULL;
 361   uint32_t frequent_size = 0;
 362 
 363   if (_recent_garbage_traces != NULL) {
 364     recent_garbage = _recent_garbage_traces->get_traces();
 365     recent_size = _recent_garbage_traces->size();
 366   }
 367 
 368   if (_frequent_garbage_traces != NULL) {
 369     frequent_garbage = _frequent_garbage_traces->get_traces();
 370     frequent_size = _frequent_garbage_traces->size();
 371   }
 372 
 373   // Simple solution since this happens at exit.


 380       if (trace->references == 0) {
 381         StackTraceData::free_data(trace);
 382       }
 383     }
 384   }
 385 
 386   // Then go through the frequent and remove those that are now only there.
 387   for (uint32_t i = 0; i < frequent_size; i++) {
 388     StackTraceData* trace = frequent_garbage[i];
 389     if (trace != NULL) {
 390       trace->references--;
 391 
 392       if (trace->references == 0) {
 393         StackTraceData::free_data(trace);
 394       }
 395     }
 396   }
 397 }
 398 
 399 void StackTraceStorage::free_storage() {
 400   if (!initialized()) {
 401     return;
 402   }
 403 
 404   delete _allocated_traces;
 405   delete _traces_on_last_full_gc;
 406 
 407   free_garbage();
 408   delete _recent_garbage_traces;
 409   delete _frequent_garbage_traces;
 410 
 411   reset();
 412 }
 413 
 414 StackTraceStorage::~StackTraceStorage() {
 415   free_storage();
 416 }
 417 
 418 void StackTraceStorage::allocate_storage(int max_gc_storage) {
 419   // In case multiple threads got locked and then 1 by 1 got through.
 420   if (initialized()) {
 421     return;
 422   }
 423 
 424   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
 425       GrowableArray<StackTraceDataWithOop>(128, true);
 426   _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal)
 427       GrowableArray<StackTraceDataWithOop>(128, true);
 428 
 429   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
 430   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 431 
 432   _max_gc_storage = max_gc_storage;
 433   OrderAccess::release_store(&_initialized, 1);
 434 }
 435 
 436 void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) {
 437   MutexLocker mu(HeapMonitorStorage_lock);
 438   // Last minute check on initialization here in case:
 439   //   Between the moment object_alloc_do_sample's check for initialization
 440   //   and now, there was a stop() that deleted the data.
 441   if (initialized()) {
 442     StackTraceDataWithOop new_data(trace, o);
 443     _stats.sample_count++;
 444     _stats.stack_depth_accumulation += trace->frame_count;
 445     _allocated_traces->append(new_data);
 446   }
 447 }
 448 
 449 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
 450                                      OopClosure* f) {
 451   size_t count = 0;
 452   if (initialized()) {
 453     int len = _allocated_traces->length();
 454 
 455     _traces_on_last_full_gc->clear();
 456 
 457     // Compact the oop traces.  Moves the live oops to the beginning of the
 458     // growable array, potentially overwriting the dead ones.

 459     for (int i = 0; i < len; i++) {
 460       StackTraceDataWithOop &trace = _allocated_traces->at(i);
 461       oop value = trace.load_oop();

 462       if (is_alive->do_object_b(value)) {
 463         // Update the oop to point to the new object if it is still alive.
 464         f->do_oop(&(trace.obj));
 465 
 466         // Copy the old trace, if it is still live.
 467         _allocated_traces->at_put(count++, trace);
 468 
 469         // Store the live trace in a cache, to be served up on /heapz.
 470         _traces_on_last_full_gc->append(trace);


 471       } else {
 472         trace.clear_oop();
 473 
 474         // If the old trace is no longer live, add it to the list of
 475         // recently collected garbage.
 476         store_garbage_trace(trace);
 477       }
 478     }
 479 
 480     // Zero out remaining array elements.  Even though the call to trunc_to
 481     // below truncates these values, zeroing them out is good practice.
 482     StackTraceDataWithOop zero_trace;
 483     for (int i = count; i < len; i++) {
 484       _allocated_traces->at_put(i, zero_trace);
 485     }
 486 
 487     // Set the array's length to the number of live elements.
 488     _allocated_traces->trunc_to(count);
 489   }
 490 
 491   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 492 }
 493 
 494 bool StackTraceStorage::deep_copy(jvmtiStackTrace* to,
 495                                   const StackTraceData* from) {
 496   const jvmtiStackTrace* src = from->trace;
 497   *to = *src;
 498 
 499   to->frames =
 500       NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
 501 
 502   if (to->frames == NULL) {
 503     return false;
 504   }
 505 
 506   memcpy(to->frames,
 507          src->frames,
 508          sizeof(jvmtiFrameInfo) * src->frame_count);


< prev index next >