1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP
  26 #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP
  27 
  28 #include "memory/allocation.inline.hpp"
  29 #include "runtime/atomic.hpp"
  30 #include "runtime/orderAccess.hpp"
  31 #include "runtime/prefetch.inline.hpp"
  32 #include "utilities/concurrentHashTable.hpp"
  33 #include "utilities/globalCounter.inline.hpp"
  34 #include "utilities/numberSeq.hpp"
  35 #include "utilities/spinYield.hpp"
  36 
  37 // 2^30 = 1G buckets
  38 #define SIZE_BIG_LOG2 30
  39 // 2^5  = 32 buckets
  40 #define SIZE_SMALL_LOG2 5
  41 
  42 // Number from spinYield.hpp. In some loops SpinYield would be unfair.
  43 #define SPINPAUSES_PER_YIELD 8192
  44 
  45 #ifdef ASSERT
  46 #ifdef _LP64
  47 // Two low bits are not usable.
  48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
  49 #else
  50 // Two low bits are not usable.
  51 static const void* POISON_PTR = (void*)0xffbadbac;
  52 #endif
  53 #endif
  54 
  55 // Node
  56 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  57 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  58 ConcurrentHashTable<VALUE, CONFIG, F>::
  59   Node::next() const
  60 {
  61   return OrderAccess::load_acquire(&_next);
  62 }
  63 
  64 // Bucket
  65 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  66 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  67 ConcurrentHashTable<VALUE, CONFIG, F>::
  68   Bucket::first_raw() const
  69 {
  70   return OrderAccess::load_acquire(&_first);
  71 }
  72 
  73 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  74 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
  75   Bucket::release_assign_node_ptr(
  76     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst,
  77     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const
  78 {
  79   // Due to this assert this methods is not static.
  80   assert(is_locked(), "Must be locked.");
  81   Node** tmp = (Node**)dst;
  82   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
  83 }
  84 
  85 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  86 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  87 ConcurrentHashTable<VALUE, CONFIG, F>::
  88   Bucket::first() const
  89 {
  90   // We strip the states bit before returning the ptr.
  91   return clear_state(OrderAccess::load_acquire(&_first));
  92 }
  93 
  94 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  95 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  96   Bucket::have_redirect() const
  97 {
  98   return is_state(first_raw(), STATE_REDIRECT_BIT);
  99 }
 100 
 101 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 102 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 103   Bucket::is_locked() const
 104 {
 105   return is_state(first_raw(), STATE_LOCK_BIT);
 106 }
 107 
 108 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 109 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 110   Bucket::lock()
 111 {
 112   int i = 0;
 113   // SpinYield would be unfair here
 114   while (!this->trylock()) {
 115     if ((++i) == SPINPAUSES_PER_YIELD) {
 116       // On contemporary OS yielding will give CPU to another runnable thread if
 117       // there is no CPU available.
 118       os::naked_yield();
 119       i = 0;
 120     } else {
 121       SpinPause();
 122     }
 123   }
 124 }
 125 
 126 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 127 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 128   Bucket::release_assign_last_node_next(
 129      typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node)
 130 {
 131   assert(is_locked(), "Must be locked.");
 132   Node* const volatile * ret = first_ptr();
 133   while (clear_state(*ret) != NULL) {
 134     ret = clear_state(*ret)->next_ptr();
 135   }
 136   release_assign_node_ptr(ret, node);
 137 }
 138 
 139 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 140 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 141   Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node,
 142                     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect
 143                     )
 144 {
 145   if (is_locked()) {
 146     return false;
 147   }
 148   if (Atomic::cmpxchg(node, &_first, expect) == expect) {
 149     return true;
 150   }
 151   return false;
 152 }
 153 
 154 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 155 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 156   Bucket::trylock()
 157 {
 158   if (is_locked()) {
 159     return false;
 160   }
 161   // We will expect a clean first pointer.
 162   Node* tmp = first();
 163   if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) {
 164     return true;
 165   }
 166   return false;
 167 }
 168 
 169 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 170 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 171   Bucket::unlock()
 172 {
 173   assert(is_locked(), "Must be locked.");
 174   assert(!have_redirect(),
 175          "Unlocking a bucket after it has reached terminal state.");
 176   OrderAccess::release_store(&_first, clear_state(first()));
 177 }
 178 
 179 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 180 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 181   Bucket::redirect()
 182 {
 183   assert(is_locked(), "Must be locked.");
 184   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
 185 }
 186 
 187 // InternalTable
 188 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 189 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 190   InternalTable::InternalTable(size_t log2_size)
 191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
 192       _hash_mask(~(~((size_t)0) << _log2_size))
 193 {
 194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
 195          "Bad size");
 196   void* memory = NEW_C_HEAP_ARRAY(Bucket, _size, F);
 197   _buckets = new (memory) Bucket[_size];
 198 }
 199 
 200 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 201 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 202   InternalTable::~InternalTable()
 203 {
 204   FREE_C_HEAP_ARRAY(Bucket, _buckets);
 205 }
 206 
 207 // ScopedCS
 208 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 209 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 210   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht)
 211     : _thread(thread), _cht(cht)
 212 {
 213   GlobalCounter::critical_section_begin(_thread);
 214   // This version is published now.
 215   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
 216     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
 217   }
 218 }
 219 
 220 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 221 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 222   ScopedCS::~ScopedCS()
 223 {
 224   GlobalCounter::critical_section_end(_thread);
 225 }
 226 
 227 // BaseConfig
 228 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 229 inline void* ConcurrentHashTable<VALUE, CONFIG, F>::
 230   BaseConfig::allocate_node(size_t size, const VALUE& value)
 231 {
 232   return AllocateHeap(size, F);
 233 }
 234 
 235 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 236 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 237   BaseConfig::free_node(void* memory, const VALUE& value)
 238 {
 239   FreeHeap(memory);
 240 }
 241 
 242 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 243 template <typename LOOKUP_FUNC>
 244 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
 245   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
 246 {
 247   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
 248 }
 249 
 250 // HaveDeletables
 251 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 252 template <typename EVALUATE_FUNC>
 253 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 254   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 255                                                       EVALUATE_FUNC& eval_f,
 256                                                       Bucket* prefetch_bucket)
 257 {
 258   // Instantiated for pointer type (true), so we can use prefetch.
 259   // When visiting all Nodes doing this prefetch give around 30%.
 260   Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
 261   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 262     if (pref != NULL) {
 263       Prefetch::read(*pref->value(), 0);
 264       pref = pref->next();
 265     }
 266     if (next->next() != NULL) {
 267       Prefetch::read(*next->next()->value(), 0);
 268     }
 269     if (eval_f(next->value())) {
 270       return true;
 271     }
 272   }
 273   return false;
 274 }
 275 
 276 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 277 template <bool b, typename EVALUATE_FUNC>
 278 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 279   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 280                                                    EVALUATE_FUNC& eval_f,
 281                                                    Bucket* preb)
 282 {
 283   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 284     if (eval_f(next->value())) {
 285       return true;
 286     }
 287   }
 288   return false;
 289 }
 290 
 291 // ConcurrentHashTable
 292 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 293 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 294   write_synchonize_on_visible_epoch(Thread* thread)
 295 {
 296   assert(_resize_lock->owned_by_self(), "Re-size lock not held");
 297   OrderAccess::fence(); // Prevent below load from floating up.
 298   // If no reader saw this version we can skip write_synchronize.
 299   if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
 300     return;
 301   }
 302   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
 303   // We set this/next version that we are synchronizing for to not published.
 304   // A reader will zero this flag if it reads this/next version.
 305   OrderAccess::release_store(&_invisible_epoch, thread);
 306   GlobalCounter::write_synchronize();
 307 }
 308 
 309 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 310 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 311   try_resize_lock(Thread* locker)
 312 {
 313   if (_resize_lock->try_lock()) {
 314     if (_resize_lock_owner != NULL) {
 315       assert(locker != _resize_lock_owner, "Already own lock");
 316       // We got mutex but internal state is locked.
 317       _resize_lock->unlock();
 318       return false;
 319     }
 320   } else {
 321     return false;
 322   }
 323   _invisible_epoch = 0;
 324   _resize_lock_owner = locker;
 325   return true;
 326 }
 327 
 328 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 329 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 330   lock_resize_lock(Thread* locker)
 331 {
 332   size_t i = 0;
 333   // If lock is hold by some other thread, the chances that it is return quick
 334   // is low. So we will prefer yielding.
 335   SpinYield yield(1, 512);
 336   do {
 337     _resize_lock->lock_without_safepoint_check();
 338     // If holder of lock dropped mutex for safepoint mutex might be unlocked,
 339     // and _resize_lock_owner will contain the owner.
 340     if (_resize_lock_owner != NULL) {
 341       assert(locker != _resize_lock_owner, "Already own lock");
 342       // We got mutex but internal state is locked.
 343       _resize_lock->unlock();
 344       yield.wait();
 345     } else {
 346       break;
 347     }
 348   } while(true);
 349   _resize_lock_owner = locker;
 350   _invisible_epoch = 0;
 351 }
 352 
 353 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 354 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 355   unlock_resize_lock(Thread* locker)
 356 {
 357   _invisible_epoch = 0;
 358   assert(locker == _resize_lock_owner, "Not unlocked by locker.");
 359   _resize_lock_owner = NULL;
 360   _resize_lock->unlock();
 361 }
 362 
 363 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 364 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 365   free_nodes()
 366 {
 367   // We assume we are not MT during freeing.
 368   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
 369     Bucket* bucket = _table->get_buckets() + node_it;
 370     Node* node = bucket->first();
 371     while (node != NULL) {
 372       Node* free_node = node;
 373       node = node->next();
 374       Node::destroy_node(free_node);
 375     }
 376   }
 377 }
 378 
 379 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 380 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 381 ConcurrentHashTable<VALUE, CONFIG, F>::
 382   get_table() const
 383 {
 384   return OrderAccess::load_acquire(&_table);
 385 }
 386 
 387 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 388 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 389 ConcurrentHashTable<VALUE, CONFIG, F>::
 390   get_new_table() const
 391 {
 392   return OrderAccess::load_acquire(&_new_table);
 393 }
 394 
 395 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 396 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 397 ConcurrentHashTable<VALUE, CONFIG, F>::
 398   set_table_from_new()
 399 {
 400   InternalTable* old_table = _table;
 401   // Publish the new table.
 402   OrderAccess::release_store(&_table, _new_table);
 403   // All must see this.
 404   GlobalCounter::write_synchronize();
 405   // _new_table not read any more.
 406   _new_table = NULL;
 407   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
 408   return old_table;
 409 }
 410 
 411 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 412 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 413   internal_grow_range(Thread* thread, size_t start, size_t stop)
 414 {
 415   assert(stop <= _table->_size, "Outside backing array");
 416   assert(_new_table != NULL, "Grow not proper setup before start");
 417   // The state is also copied here. Hence all buckets in new table will be
 418   // locked. I call the siblings odd/even, where even have high bit 0 and odd
 419   // have high bit 1.
 420   for (size_t even_index = start; even_index < stop; even_index++) {
 421     Bucket* bucket = _table->get_bucket(even_index);
 422 
 423     bucket->lock();
 424 
 425     size_t odd_index = even_index + _table->_size;
 426     _new_table->get_buckets()[even_index] = *bucket;
 427     _new_table->get_buckets()[odd_index] = *bucket;
 428 
 429     // Moves lockers go to new table, where they will wait until unlock() below.
 430     bucket->redirect(); /* Must release stores above */
 431 
 432     // When this is done we have separated the nodes into corresponding buckets
 433     // in new table.
 434     if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) {
 435       // If bucket is empty, unzip does nothing.
 436       // We must make sure readers go to new table before we poison the bucket.
 437       DEBUG_ONLY(GlobalCounter::write_synchronize();)
 438     }
 439 
 440     // Unlock for writes into the new table buckets.
 441     _new_table->get_bucket(even_index)->unlock();
 442     _new_table->get_bucket(odd_index)->unlock();
 443 
 444     DEBUG_ONLY(
 445        bucket->release_assign_node_ptr(
 446           _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
 447     )
 448   }
 449 }
 450 
 451 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 452 template <typename LOOKUP_FUNC, typename DELETE_FUNC>
 453 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 454   internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
 455 {
 456   Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 457   assert(bucket->is_locked(), "Must be locked.");
 458   Node* const volatile * rem_n_prev = bucket->first_ptr();
 459   Node* rem_n = bucket->first();
 460   bool have_dead = false;
 461   while (rem_n != NULL) {
 462     if (lookup_f.equals(rem_n->value(), &have_dead)) {
 463       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 464       break;
 465     } else {
 466       rem_n_prev = rem_n->next_ptr();
 467       rem_n = rem_n->next();
 468     }
 469   }
 470 
 471   bucket->unlock();
 472 
 473   if (rem_n == NULL) {
 474     return false;
 475   }
 476   // Publish the deletion.
 477   GlobalCounter::write_synchronize();
 478   delete_f(rem_n->value());
 479   Node::destroy_node(rem_n);
 480   return true;
 481 }
 482 
 483 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 484 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 485 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 486   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
 487                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
 488 {
 489   // Here we have resize lock so table is SMR safe, and there is no new
 490   // table. Can do this in parallel if we want.
 491   assert(_resize_lock->owned_by_self(), "Re-size lock not held");
 492   Node* ndel[BULK_DELETE_LIMIT];
 493   InternalTable* table = get_table();
 494   assert(start_idx < stop_idx, "Must be");
 495   assert(stop_idx <= _table->_size, "Must be");
 496   // Here manual do critical section since we don't want to take the cost of
 497   // locking the bucket if there is nothing to delete. But we can have
 498   // concurrent single deletes. The _invisible_epoch can only be used by the
 499   // owner of _resize_lock, us here. There we should not changed it in our
 500   // own read-side.
 501   GlobalCounter::critical_section_begin(thread);
 502   for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
 503     Bucket* bucket  = _table->get_bucket(bucket_it);
 504     Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
 505                               _table->get_bucket(bucket_it+1) : NULL;
 506 
 507     if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
 508         have_deletable(bucket, eval_f, prefetch_bucket)) {
 509         // Nothing to remove in this bucket.
 510         continue;
 511     }
 512 
 513     GlobalCounter::critical_section_end(thread);
 514     // We left critical section but the bucket cannot be removed while we hold
 515     // the _resize_lock.
 516     bucket->lock();
 517     size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
 518     bucket->unlock();
 519     write_synchonize_on_visible_epoch(thread);
 520     for (size_t node_it = 0; node_it < nd; node_it++) {
 521       del_f(ndel[node_it]->value());
 522       Node::destroy_node(ndel[node_it]);
 523       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 524     }
 525     GlobalCounter::critical_section_begin(thread);
 526   }
 527   GlobalCounter::critical_section_end(thread);
 528 }
 529 
 530 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 531 template <typename LOOKUP_FUNC>
 532 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 533   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 534 {
 535   size_t dels = 0;
 536   Node* ndel[BULK_DELETE_LIMIT];
 537   Node* const volatile * rem_n_prev = bucket->first_ptr();
 538   Node* rem_n = bucket->first();
 539   while (rem_n != NULL) {
 540     bool is_dead = false;
 541     lookup_f.equals(rem_n->value(), &is_dead);
 542     if (is_dead) {
 543       ndel[dels++] = rem_n;
 544       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 545       rem_n = rem_n->next();
 546       if (dels == BULK_DELETE_LIMIT) {
 547         break;
 548       }
 549     } else {
 550       rem_n_prev = rem_n->next_ptr();
 551       rem_n = rem_n->next();
 552     }
 553   }
 554   if (dels > 0) {
 555     GlobalCounter::write_synchronize();
 556     for (size_t node_it = 0; node_it < dels; node_it++) {
 557       Node::destroy_node(ndel[node_it]);
 558       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 559     }
 560   }
 561 }
 562 
 563 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 564 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 565 ConcurrentHashTable<VALUE, CONFIG, F>::
 566   get_bucket(uintx hash) const
 567 {
 568   InternalTable* table = get_table();
 569   Bucket* bucket = get_bucket_in(table, hash);
 570   if (bucket->have_redirect()) {
 571     table = get_new_table();
 572     bucket = get_bucket_in(table, hash);
 573   }
 574   return bucket;
 575 }
 576 
 577 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 578 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 579 ConcurrentHashTable<VALUE, CONFIG, F>::
 580   get_bucket_locked(Thread* thread, const uintx hash)
 581 {
 582   Bucket* bucket;
 583   int i = 0;
 584   // SpinYield would be unfair here
 585   while(true) {
 586     {
 587       // We need a critical section to protect the table itself. But if we fail
 588       // we must leave critical section otherwise we would deadlock.
 589       ScopedCS cs(thread, this);
 590       bucket = get_bucket(hash);
 591       if (bucket->trylock()) {
 592         break; /* ends critical section */
 593       }
 594     } /* ends critical section */
 595     if ((++i) == SPINPAUSES_PER_YIELD) {
 596       // On contemporary OS yielding will give CPU to another runnable thread if
 597       // there is no CPU available.
 598       os::naked_yield();
 599       i = 0;
 600     } else {
 601       SpinPause();
 602     }
 603   }
 604   return bucket;
 605 }
 606 
 607 // Always called within critical section
 608 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 609 template <typename LOOKUP_FUNC>
 610 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
 611 ConcurrentHashTable<VALUE, CONFIG, F>::
 612   get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
 613            bool* have_dead, size_t* loops) const
 614 {
 615   size_t loop_count = 0;
 616   Node* node = bucket->first();
 617   while (node != NULL) {
 618     bool is_dead = false;
 619     ++loop_count;
 620     if (lookup_f.equals(node->value(), &is_dead)) {
 621       break;
 622     }
 623     if (is_dead && !(*have_dead)) {
 624       *have_dead = true;
 625     }
 626     node = node->next();
 627   }
 628   if (loops != NULL) {
 629     *loops = loop_count;
 630   }
 631   return node;
 632 }
 633 
 634 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 635 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 636   unzip_bucket(Thread* thread, InternalTable* old_table,
 637                InternalTable* new_table, size_t even_index, size_t odd_index)
 638 {
 639   Node* aux = old_table->get_bucket(even_index)->first();
 640   if (aux == NULL) {
 641     // This is an empty bucket and in debug we poison first ptr in bucket.
 642     // Therefore we must make sure no readers are looking at this bucket.
 643     // If we don't do a write_synch here, caller must do it.
 644     return false;
 645   }
 646   Node* delete_me = NULL;
 647   Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
 648   Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
 649   while (aux != NULL) {
 650     bool dead_hash = false;
 651     size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
 652     if (dead_hash) {
 653       delete_me = aux;
 654       // This item is dead, move both list to next
 655       new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 656                                                                 aux->next());
 657       new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 658                                                                  aux->next());
 659     } else {
 660       size_t aux_index = bucket_idx_hash(new_table, aux_hash);
 661       if (aux_index == even_index) {
 662         // This is a even, so move odd to aux/even next
 663         new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 664                                                                   aux->next());
 665         // Keep in even list
 666         even = aux->next_ptr();
 667       } else if (aux_index == odd_index) {
 668         // This is a odd, so move odd to aux/odd next
 669         new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 670                                                                    aux->next());
 671         // Keep in odd list
 672         odd = aux->next_ptr();
 673       } else {
 674         fatal("aux_index does not match even or odd indices");
 675       }
 676     }
 677     aux = aux->next();
 678 
 679     // We can only move 1 pointer otherwise a reader might be moved to the wrong
 680     // chain. E.g. looking for even hash value but got moved to the odd bucket
 681     // chain.
 682     write_synchonize_on_visible_epoch(thread);
 683     if (delete_me != NULL) {
 684       Node::destroy_node(delete_me);
 685       delete_me = NULL;
 686     }
 687   }
 688   return true;
 689 }
 690 
 691 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 692 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 693   internal_shrink_prolog(Thread* thread, size_t log2_size)
 694 {
 695   if (!try_resize_lock(thread)) {
 696     return false;
 697   }
 698 
 699   assert(_resize_lock->owned_by_self(), "Re-size lock not held");
 700 
 701   if (_table->_log2_size == _log2_start_size ||
 702       _table->_log2_size <= log2_size) {
 703     unlock_resize_lock(thread);
 704     return false;
 705   }
 706 
 707   _new_table = new InternalTable(_table->_log2_size - 1);
 708 
 709   return true;
 710 }
 711 
 712 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 713 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 714   internal_shrink_epilog(Thread* thread)
 715 {
 716   assert(_resize_lock->owned_by_self(), "Re-size lock not held");
 717   assert(_resize_lock_owner, "Should be locked");
 718 
 719   InternalTable* old_table = set_table_from_new();
 720   _size_limit_reached = false;
 721   unlock_resize_lock(thread);
 722 #ifdef ASSERT
 723   for (size_t i = 0; i < old_table->_size; i++) {
 724     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 725            "No poison found");
 726   }
 727 #endif
 728   // ABA safe, old_table not visible to any other threads.
 729   delete old_table;
 730 }
 731 
 732 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 733 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 734   internal_shrink_range(Thread* thread, size_t start, size_t stop)
 735 {
 736   // The state is also copied here.
 737   // Hence all buckets in new table will be locked.
 738   for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
 739     size_t even_hash_index = bucket_it; // High bit 0
 740     size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1
 741 
 742     Bucket* b_old_even = _table->get_bucket(even_hash_index);
 743     Bucket* b_old_odd  = _table->get_bucket(odd_hash_index);
 744 
 745     b_old_even->lock();
 746     b_old_odd->lock();
 747 
 748     _new_table->get_buckets()[bucket_it] = *b_old_even;
 749 
 750     // Put chains together.
 751     _new_table->get_bucket(bucket_it)->
 752       release_assign_last_node_next(*(b_old_odd->first_ptr()));
 753 
 754     b_old_even->redirect();
 755     b_old_odd->redirect();
 756 
 757     write_synchonize_on_visible_epoch(thread);
 758 
 759     // Unlock for writes into new smaller table.
 760     _new_table->get_bucket(bucket_it)->unlock();
 761 
 762     DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
 763                                                    (Node*)POISON_PTR);)
 764     DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
 765                                                   (Node*)POISON_PTR);)
 766   }
 767 }
 768 
 769 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 770 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 771   internal_shrink(Thread* thread, size_t log2_size)
 772 {
 773   if (!internal_shrink_prolog(thread, log2_size)) {
 774     assert(!_resize_lock->owned_by_self(), "Re-size lock held");
 775     return false;
 776   }
 777   assert(_resize_lock->owned_by_self(), "Re-size lock not held");
 778   assert(_resize_lock_owner == thread, "Should be locked by me");
 779   internal_shrink_range(thread, 0, _new_table->_size);
 780   internal_shrink_epilog(thread);
 781   assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
 782   return true;
 783 }
 784 
 785 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 786 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 787   internal_grow_prolog(Thread* thread, size_t log2_size)
 788 {
 789   // This double checking of _size_limit_reached/is_max_size_reached()
 790   //  we only do in grow path, since grow means high load on table
 791   // while shrink means low load.
 792   if (is_max_size_reached()) {
 793     return false;
 794   }
 795   if (!try_resize_lock(thread)) {
 796     // Either we have an ongoing resize or an operation which doesn't want us
 797     // to resize now.
 798     return false;
 799   }
 800   if (is_max_size_reached() || _table->_log2_size >= log2_size) {
 801     unlock_resize_lock(thread);
 802     return false;
 803   }
 804 
 805   _new_table = new InternalTable(_table->_log2_size + 1);
 806 
 807   if (_new_table->_log2_size == _log2_size_limit) {
 808     _size_limit_reached = true;
 809   }
 810 
 811   return true;
 812 }
 813 
 814 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 815 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 816   internal_grow_epilog(Thread* thread)
 817 {
 818   assert(_resize_lock->owned_by_self(), "Re-size lock not held");
 819   assert(_resize_lock_owner, "Should be locked");
 820 
 821   InternalTable* old_table = set_table_from_new();
 822   unlock_resize_lock(thread);
 823 #ifdef ASSERT
 824   for (size_t i = 0; i < old_table->_size; i++) {
 825     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 826            "No poison found");
 827   }
 828 #endif
 829   // ABA safe, old_table not visible to any other threads.
 830   delete old_table;
 831 }
 832 
 833 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 834 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 835   internal_grow(Thread* thread, size_t log2_size)
 836 {
 837   if (!internal_grow_prolog(thread, log2_size)) {
 838     assert(!_resize_lock->owned_by_self(), "Re-size lock held");
 839     return false;
 840   }
 841   assert(_resize_lock->owned_by_self(), "Re-size lock not held");
 842   assert(_resize_lock_owner == thread, "Should be locked by me");
 843   internal_grow_range(thread, 0, _table->_size);
 844   internal_grow_epilog(thread);
 845   assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
 846   return true;
 847 }
 848 
 849 // Always called within critical section
 850 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 851 template <typename LOOKUP_FUNC>
 852 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
 853   internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
 854 {
 855   bool clean = false;
 856   size_t loops = 0;
 857   VALUE* ret = NULL;
 858 
 859   const Bucket* bucket = get_bucket(lookup_f.get_hash());
 860   Node* node = get_node(bucket, lookup_f, &clean, &loops);
 861   if (node != NULL) {
 862     ret = node->value();
 863   }
 864   if (grow_hint != NULL) {
 865     *grow_hint = loops > _grow_hint;
 866   }
 867 
 868   return ret;
 869 }
 870 
 871 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 872 template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
 873 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 874   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f,
 875                   CALLBACK_FUNC& callback, bool* grow_hint)
 876 {
 877   bool ret = false;
 878   bool clean = false;
 879   bool locked;
 880   size_t loops = 0;
 881   size_t i = 0;
 882   Node* new_node = NULL;
 883   uintx hash = lookup_f.get_hash();
 884   while (true) {
 885     {
 886       ScopedCS cs(thread, this); /* protected the table/bucket */
 887       Bucket* bucket = get_bucket(hash);
 888 
 889       Node* first_at_start = bucket->first();
 890       Node* old = get_node(bucket, lookup_f, &clean, &loops);
 891       if (old == NULL) {
 892         // No duplicate found.
 893         if (new_node == NULL) {
 894           new_node = Node::create_node(value_f(), first_at_start);
 895         } else {
 896           new_node->set_next(first_at_start);
 897         }
 898         if (bucket->cas_first(new_node, first_at_start)) {
 899           callback(true, new_node->value());
 900           new_node = NULL;
 901           ret = true;
 902           break; /* leave critical section */
 903         }
 904         // CAS failed we must leave critical section and retry.
 905         locked = bucket->is_locked();
 906       } else {
 907         // There is a duplicate.
 908         callback(false, old->value());
 909         break; /* leave critical section */
 910       }
 911     } /* leave critical section */
 912     i++;
 913     if (locked) {
 914       os::naked_yield();
 915     } else {
 916       SpinPause();
 917     }
 918   }
 919 
 920   if (new_node != NULL) {
 921     // CAS failed and a duplicate was inserted, we must free this node.
 922     Node::destroy_node(new_node);
 923   } else if (i == 0 && clean) {
 924     // We only do cleaning on fast inserts.
 925     Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 926     assert(bucket->is_locked(), "Must be locked.");
 927     delete_in_bucket(thread, bucket, lookup_f);
 928     bucket->unlock();
 929   }
 930 
 931   if (grow_hint != NULL) {
 932     *grow_hint = loops > _grow_hint;
 933   }
 934 
 935   return ret;
 936 }
 937 
 938 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 939 template <typename FUNC>
 940 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 941   visit_nodes(Bucket* bucket, FUNC& visitor_f)
 942 {
 943   Node* current_node = bucket->first();
 944   while (current_node != NULL) {
 945     if (!visitor_f(current_node->value())) {
 946       return false;
 947     }
 948     current_node = current_node->next();
 949   }
 950   return true;
 951 }
 952 
 953 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 954 template <typename FUNC>
 955 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 956   do_scan_locked(Thread* thread, FUNC& scan_f)
 957 {
 958   assert(_resize_lock->owned_by_self() ||
 959          (thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()),
 960          "Re-size lock not held or not VMThread at safepoint");
 961   // We can do a critical section over the entire loop but that would block
 962   // updates for a long time. Instead we choose to block resizes.
 963   InternalTable* table = get_table();
 964   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
 965     ScopedCS cs(thread, this);
 966     if (!visit_nodes(_table->get_bucket(bucket_it), scan_f)) {
 967       break; /* ends critical section */
 968     }
 969   } /* ends critical section */
 970 }
 971 
 972 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 973 template <typename EVALUATE_FUNC>
 974 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
 975   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
 976                      size_t num_del, Node** ndel)
 977 {
 978   size_t dels = 0;
 979   Node* const volatile * rem_n_prev = bucket->first_ptr();
 980   Node* rem_n = bucket->first();
 981   while (rem_n != NULL) {
 982     if (eval_f(rem_n->value())) {
 983       ndel[dels++] = rem_n;
 984       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 985       rem_n = rem_n->next();
 986       if (dels == num_del) {
 987         break;
 988       }
 989     } else {
 990       rem_n_prev = rem_n->next_ptr();
 991       rem_n = rem_n->next();
 992     }
 993   }
 994   return dels;
 995 }
 996 
 997 // Constructor
 998 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 999 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1000   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
1001     : _new_table(NULL), _log2_start_size(log2size),
1002        _log2_size_limit(log2size_limit), _grow_hint(grow_hint),
1003        _size_limit_reached(false), _resize_lock_owner(NULL),
1004        _invisible_epoch(0)
1005 {
1006   _resize_lock =
1007     new Mutex(Mutex::leaf, "ConcurrentHashTable", false,
1008               Monitor::_safepoint_check_never);
1009   _table = new InternalTable(log2size);
1010   assert(log2size_limit >= log2size, "bad ergo");
1011   _size_limit_reached = _table->_log2_size == _log2_size_limit;
1012 }
1013 
1014 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1015 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1016   ~ConcurrentHashTable()
1017 {
1018   delete _resize_lock;
1019   free_nodes();
1020   delete _table;
1021 }
1022 
1023 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1024 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
1025   get_size_log2(Thread* thread)
1026 {
1027   ScopedCS cs(thread, this);
1028   return _table->_log2_size;
1029 }
1030 
1031 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1032 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1033   shrink(Thread* thread, size_t size_limit_log2)
1034 {
1035   size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
1036   bool ret = internal_shrink(thread, tmp);
1037   return ret;
1038 }
1039 
1040 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1041 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1042   grow(Thread* thread, size_t size_limit_log2)
1043 {
1044   size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
1045   return internal_grow(thread, tmp);
1046 }
1047 
1048 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1049 template <typename LOOKUP_FUNC, typename FOUND_FUNC>
1050 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1051   get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
1052 {
1053   bool ret = false;
1054   ScopedCS cs(thread, this);
1055   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1056   if (val != NULL) {
1057     found_f(val);
1058     ret = true;
1059   }
1060   return ret;
1061 }
1062 
1063 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1064 template <typename LOOKUP_FUNC>
1065 inline VALUE ConcurrentHashTable<VALUE, CONFIG, F>::
1066   get_copy(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
1067 {
1068   ScopedCS cs(thread, this);
1069   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1070   return val != NULL ? *val : CONFIG::notfound();
1071 }
1072 
1073 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1074 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1075   unsafe_insert(const VALUE& value) {
1076   bool dead_hash = false;
1077   size_t hash = CONFIG::get_hash(value, &dead_hash);
1078   if (dead_hash) {
1079     return false;
1080   }
1081   // This is an unsafe operation.
1082   InternalTable* table = get_table();
1083   Bucket* bucket = get_bucket_in(table, hash);
1084   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1085   Node* new_node = Node::create_node(value, bucket->first());
1086   if (!bucket->cas_first(new_node, bucket->first())) {
1087     assert(false, "bad");
1088   }
1089   return true;
1090 }
1091 
1092 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1093 template <typename SCAN_FUNC>
1094 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1095   try_scan(Thread* thread, SCAN_FUNC& scan_f)
1096 {
1097   assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1098   bool vm_and_safepoint = thread->is_VM_thread() &&
1099                           SafepointSynchronize::is_at_safepoint();
1100   if (!vm_and_safepoint && !try_resize_lock(thread)) {
1101     return false;
1102   }
1103   do_scan_locked(thread, scan_f);
1104   if (!vm_and_safepoint) {
1105     unlock_resize_lock(thread);
1106   }
1107   assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1108   return true;
1109 }
1110 
1111 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1112 template <typename SCAN_FUNC>
1113 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1114   do_scan(Thread* thread, SCAN_FUNC& scan_f)
1115 {
1116   assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1117   lock_resize_lock(thread);
1118   do_scan_locked(thread, scan_f);
1119   unlock_resize_lock(thread);
1120   assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1121 }
1122 
1123 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1124 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1125 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1126   try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1127 {
1128   if (!try_resize_lock(thread)) {
1129     assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1130     return false;
1131   }
1132   do_bulk_delete_locked(thread, eval_f, del_f);
1133   unlock_resize_lock(thread);
1134   assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1135   return true;
1136 }
1137 
1138 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1139 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1140 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1141   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1142 {
1143   assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1144   lock_resize_lock(thread);
1145   do_bulk_delete_locked(thread, eval_f, del_f);
1146   unlock_resize_lock(thread);
1147   assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1148 }
1149 
1150 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1151 template <typename VALUE_SIZE_FUNC>
1152 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1153   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1154                 outputStream* st, const char* table_name)
1155 {
1156   NumberSeq summary;
1157   size_t literal_bytes = 0;
1158   if ((thread->is_VM_thread() && !SafepointSynchronize::is_at_safepoint()) ||
1159       (!thread->is_VM_thread() && !try_resize_lock(thread))) {
1160     st->print_cr("statistics unavailable at this moment");
1161     return;
1162   }
1163 
1164   InternalTable* table = get_table();
1165   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1166     ScopedCS cs(thread, this);
1167     size_t count = 0;
1168     Bucket* bucket = _table->get_bucket(bucket_it);
1169     if (bucket->have_redirect() || bucket->is_locked()) {
1170         continue;
1171     }
1172     Node* current_node = bucket->first();
1173     while (current_node != NULL) {
1174       ++count;
1175       literal_bytes += vs_f(current_node->value());
1176       current_node = current_node->next();
1177     }
1178     summary.add((double)count);
1179   }
1180 
1181   double num_buckets = summary.num();
1182   double num_entries = summary.sum();
1183 
1184   size_t bucket_bytes = num_buckets * sizeof(Bucket);
1185   size_t entry_bytes  = num_entries * sizeof(Node);
1186   size_t total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
1187 
1188   size_t bucket_size  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
1189   size_t entry_size   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
1190 
1191   st->print_cr("%s statistics:", table_name);
1192   st->print_cr("Number of buckets       : %9" PRIuPTR " = %9" PRIuPTR
1193                " bytes, each " SIZE_FORMAT,
1194                (size_t)num_buckets, bucket_bytes,  bucket_size);
1195   st->print_cr("Number of entries       : %9" PRIuPTR " = %9" PRIuPTR
1196                " bytes, each " SIZE_FORMAT,
1197                (size_t)num_entries, entry_bytes,   entry_size);
1198   if (literal_bytes != 0) {
1199     double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
1200     st->print_cr("Number of literals      : %9" PRIuPTR " = %9" PRIuPTR
1201                  " bytes, avg %7.3f",
1202                  (size_t)num_entries, literal_bytes, literal_avg);
1203   }
1204   st->print_cr("Total footprsize_t         : %9s = %9" PRIuPTR " bytes", ""
1205                , total_bytes);
1206   st->print_cr("Average bucket size     : %9.3f", summary.avg());
1207   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
1208   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
1209   st->print_cr("Maximum bucket size     : %9" PRIuPTR,
1210                (size_t)summary.maximum());
1211   if (!thread->is_VM_thread()) {
1212     unlock_resize_lock(thread);
1213   }
1214 }
1215 
1216 #endif // include guard