< prev index next >

src/hotspot/share/runtime/heapMonitoring.cpp

Print this page
rev 48551 : [mq]: heap8
rev 48552 : [mq]: heap10a
rev 48553 : [mq]: heap14_rebased
rev 48555 : [mq]: heap16
rev 48556 : [mq]: heap17
rev 48557 : [mq]: heap17
rev 48558 : [mq]: heap19
rev 48559 : [mq]: heap20

@@ -25,10 +25,11 @@
 #include "precompiled.hpp"
 
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/universe.hpp"
 #include "runtime/heapMonitoring.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/vframe.hpp"
 
 static const int MaxStackDepth = 1024;
 
 // Internal data structure representing traces, used when object has been GC'd.

@@ -57,13 +58,27 @@
 // is live. Since this structure just passes the trace to the GC lists, it does
 // not handle any freeing.
 struct StackTraceDataWithOop : public StackTraceData {
   oop obj;
 
-  StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t), obj(o) {}
+  StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) {
+    store_oop(o);
+  }
 
   StackTraceDataWithOop() : StackTraceData(), obj(NULL) {}
+
+  oop load_oop() {
+    return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(&obj);
+  }
+
+  void store_oop(oop value) {
+    RootAccess<ON_PHANTOM_OOP_REF>::oop_store(&obj, value);
+  }
+
+  void clear_oop() {
+    store_oop(reinterpret_cast<oop>(NULL));
+  }
 };
 
 // Fixed size buffer for holding garbage traces.
 class GarbageTracesBuffer : public CHeapObj<mtInternal> {
  public:

@@ -240,11 +255,13 @@
   void accumulate_sample_rate(size_t rate) {
     _stats.sample_rate_accumulation += rate;
     _stats.sample_rate_count++;
   }
 
-  bool initialized() { return _initialized; }
+  bool initialized() {
+    return OrderAccess::load_acquire(&_initialized) != 0;
+  }
 
  private:
   // The traces currently sampled.
   GrowableArray<StackTraceDataWithOop>* _allocated_traces;
 

@@ -262,11 +279,11 @@
 
   // Maximum amount of storage provided by the JVMTI call initialize_profiling.
   int _max_gc_storage;
 
   static StackTraceStorage* internal_storage;
-  volatile bool _initialized;
+  int _initialized;
 
   // Support functions and classes for copying data to the external
   // world.
   class StackTraceDataCopier {
    public:

@@ -331,11 +348,11 @@
   _allocated_traces = NULL;
   _traces_on_last_full_gc = NULL;
   _recent_garbage_traces = NULL;
   _frequent_garbage_traces = NULL;
   _max_gc_storage = 0;
-  _initialized = false;
+  OrderAccess::release_store(&_initialized, 0);
 }
 
 void StackTraceStorage::free_garbage() {
   StackTraceData** recent_garbage = NULL;
   uint32_t recent_size = 0;

@@ -378,11 +395,11 @@
     }
   }
 }
 
 void StackTraceStorage::free_storage() {
-  if (!_initialized) {
+  if (!initialized()) {
     return;
   }
 
   delete _allocated_traces;
   delete _traces_on_last_full_gc;

@@ -398,11 +415,11 @@
   free_storage();
 }
 
 void StackTraceStorage::allocate_storage(int max_gc_storage) {
   // In case multiple threads got locked and then 1 by 1 got through.
-  if (_initialized) {
+  if (initialized()) {
     return;
   }
 
   _allocated_traces = new (ResourceObj::C_HEAP, mtInternal)
       GrowableArray<StackTraceDataWithOop>(128, true);

@@ -411,68 +428,66 @@
 
   _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage);
   _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage);
 
   _max_gc_storage = max_gc_storage;
-  _initialized = true;
+  OrderAccess::release_store(&_initialized, 1);
 }
 
 void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) {
   MutexLocker mu(HeapMonitorStorage_lock);
   // Last minute check on initialization here in case:
   //   Between the moment object_alloc_do_sample's check for initialization
   //   and now, there was a stop() that deleted the data.
-  if (_initialized) {
+  if (initialized()) {
     StackTraceDataWithOop new_data(trace, o);
     _stats.sample_count++;
     _stats.stack_depth_accumulation += trace->frame_count;
     _allocated_traces->append(new_data);
   }
 }
 
 void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
                                      OopClosure* f) {
   size_t count = 0;
-  if (_initialized) {
+  if (initialized()) {
     int len = _allocated_traces->length();
 
     _traces_on_last_full_gc->clear();
 
     // Compact the oop traces.  Moves the live oops to the beginning of the
     // growable array, potentially overwriting the dead ones.
-    int curr_pos = 0;
     for (int i = 0; i < len; i++) {
       StackTraceDataWithOop &trace = _allocated_traces->at(i);
-      oop value = RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(
-          &trace.obj);
+      oop value = trace.load_oop();
       if (is_alive->do_object_b(value)) {
         // Update the oop to point to the new object if it is still alive.
         f->do_oop(&(trace.obj));
 
         // Copy the old trace, if it is still live.
-        _allocated_traces->at_put(curr_pos++, trace);
+        _allocated_traces->at_put(count++, trace);
 
         // Store the live trace in a cache, to be served up on /heapz.
         _traces_on_last_full_gc->append(trace);
-
-        count++;
       } else {
+        trace.clear_oop();
+
         // If the old trace is no longer live, add it to the list of
         // recently collected garbage.
         store_garbage_trace(trace);
       }
     }
 
     // Zero out remaining array elements.  Even though the call to trunc_to
     // below truncates these values, zeroing them out is good practice.
     StackTraceDataWithOop zero_trace;
-    for (int i = curr_pos; i < len; i++) {
+    for (int i = count; i < len; i++) {
       _allocated_traces->at_put(i, zero_trace);
     }
 
     // Set the array's length to the number of live elements.
-    _allocated_traces->trunc_to(curr_pos);
+    _allocated_traces->trunc_to(count);
   }
 
   log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
 }
 
< prev index next >