< prev index next >

src/hotspot/share/utilities/concurrentHashTable.inline.hpp

Print this page
rev 52316 : 8185525: Add JFR event for DictionarySizes
Summary: Added TableStatistics event
Reviewed-by: egahlin, coleenp

@@ -478,10 +478,11 @@
   }
   // Publish the deletion.
   GlobalCounter::write_synchronize();
   delete_f(rem_n->value());
   Node::destroy_node(rem_n);
+  JFR_ONLY(_stats_rate.remove();)
   return true;
 }
 
 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 template <typename EVALUATE_FUNC, typename DELETE_FUNC>

@@ -526,10 +527,11 @@
       write_synchonize_on_visible_epoch(thread);
     }
     for (size_t node_it = 0; node_it < nd; node_it++) {
       del_f(ndel[node_it]->value());
       Node::destroy_node(ndel[node_it]);
+      JFR_ONLY(_stats_rate.remove();)
       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
     }
     GlobalCounter::critical_section_begin(thread);
   }
   GlobalCounter::critical_section_end(thread);

@@ -562,10 +564,11 @@
   }
   if (dels > 0) {
     GlobalCounter::write_synchronize();
     for (size_t node_it = 0; node_it < dels; node_it++) {
       Node::destroy_node(ndel[node_it]);
+      JFR_ONLY(_stats_rate.remove();)
       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
     }
   }
 }
 

@@ -896,10 +899,11 @@
           new_node = Node::create_node(value_f(), first_at_start);
         } else {
           new_node->set_next(first_at_start);
         }
         if (bucket->cas_first(new_node, first_at_start)) {
+          JFR_ONLY(_stats_rate.add();)
           callback(true, new_node->value());
           new_node = NULL;
           ret = true;
           break; /* leave critical section */
         }

@@ -1002,10 +1006,11 @@
     : _new_table(NULL), _log2_start_size(log2size),
        _log2_size_limit(log2size_limit), _grow_hint(grow_hint),
        _size_limit_reached(false), _resize_lock_owner(NULL),
        _invisible_epoch(0)
 {
+  _stats_rate = TableRateStatistics();
   _resize_lock =
     new Mutex(Mutex::leaf, "ConcurrentHashTable", false,
               Monitor::_safepoint_check_never);
   _table = new InternalTable(log2size);
   assert(log2size_limit >= log2size, "bad ergo");

@@ -1085,10 +1090,11 @@
   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
   Node* new_node = Node::create_node(value, bucket->first());
   if (!bucket->cas_first(new_node, bucket->first())) {
     assert(false, "bad");
   }
+  JFR_ONLY(_stats_rate.add();)
   return true;
 }
 
 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 template <typename SCAN_FUNC>

@@ -1139,21 +1145,15 @@
   unlock_resize_lock(thread);
 }
 
 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 template <typename VALUE_SIZE_FUNC>
-inline void ConcurrentHashTable<VALUE, CONFIG, F>::
-  statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
-                outputStream* st, const char* table_name)
+inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>::
+  statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)
 {
   NumberSeq summary;
   size_t literal_bytes = 0;
-  if (!try_resize_lock(thread)) {
-    st->print_cr("statistics unavailable at this moment");
-    return;
-  }
-
   InternalTable* table = get_table();
   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
     ScopedCS cs(thread, this);
     size_t count = 0;
     Bucket* bucket = table->get_bucket(bucket_it);

@@ -1167,41 +1167,43 @@
       current_node = current_node->next();
     }
     summary.add((double)count);
   }
 
-  double num_buckets = summary.num();
-  double num_entries = summary.sum();
+  return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node));
+}
+
+template <typename VALUE, typename CONFIG, MEMFLAGS F>
+template <typename VALUE_SIZE_FUNC>
+inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>::
+  statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old)
+{
+  if (!try_resize_lock(thread)) {
+    return old;
+  }
 
-  size_t bucket_bytes = num_buckets * sizeof(Bucket);
-  size_t entry_bytes  = num_entries * sizeof(Node);
-  size_t total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
-
-  size_t bucket_size  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
-  size_t entry_size   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
-
-  st->print_cr("%s statistics:", table_name);
-  st->print_cr("Number of buckets       : %9" PRIuPTR " = %9" PRIuPTR
-               " bytes, each " SIZE_FORMAT,
-               (size_t)num_buckets, bucket_bytes,  bucket_size);
-  st->print_cr("Number of entries       : %9" PRIuPTR " = %9" PRIuPTR
-               " bytes, each " SIZE_FORMAT,
-               (size_t)num_entries, entry_bytes,   entry_size);
-  if (literal_bytes != 0) {
-    double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
-    st->print_cr("Number of literals      : %9" PRIuPTR " = %9" PRIuPTR
-                 " bytes, avg %7.3f",
-                 (size_t)num_entries, literal_bytes, literal_avg);
-  }
-  st->print_cr("Total footprsize_t         : %9s = %9" PRIuPTR " bytes", ""
-               , total_bytes);
-  st->print_cr("Average bucket size     : %9.3f", summary.avg());
-  st->print_cr("Variance of bucket size : %9.3f", summary.variance());
-  st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
-  st->print_cr("Maximum bucket size     : %9" PRIuPTR,
-               (size_t)summary.maximum());
+  TableStatistics ts = statistics_calculate(thread, vs_f);
   unlock_resize_lock(thread);
+
+  return ts;
+}
+
+template <typename VALUE, typename CONFIG, MEMFLAGS F>
+template <typename VALUE_SIZE_FUNC>
+inline void ConcurrentHashTable<VALUE, CONFIG, F>::
+  statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
+                outputStream* st, const char* table_name)
+{
+  if (!try_resize_lock(thread)) {
+    st->print_cr("statistics unavailable at this moment");
+    return;
+  }
+
+  TableStatistics ts = statistics_calculate(thread, vs_f);
+  unlock_resize_lock(thread);
+
+  ts.print(st, table_name);
 }
 
 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht)
< prev index next >