1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP
  26 #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP
  27 
  28 #include "memory/allocation.inline.hpp"
  29 #include "runtime/atomic.hpp"
  30 #include "runtime/orderAccess.hpp"
  31 #include "runtime/prefetch.inline.hpp"
  32 #include "utilities/concurrentHashTable.hpp"
  33 #include "utilities/globalCounter.inline.hpp"
  34 #include "utilities/numberSeq.hpp"
  35 #include "utilities/spinYield.hpp"
  36 
  37 // 2^30 = 1G buckets
  38 #define SIZE_BIG_LOG2 30
  39 // 2^5  = 32 buckets
  40 #define SIZE_SMALL_LOG2 5
  41 
  42 // Number from spinYield.hpp. In some loops SpinYield would be unfair.
  43 #define SPINPAUSES_PER_YIELD 8192
  44 
  45 #ifdef ASSERT
  46 #ifdef _LP64
  47 // Two low bits are not usable.
  48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
  49 #else
  50 // Two low bits are not usable.
  51 static const void* POISON_PTR = (void*)0xffbadbac;
  52 #endif
  53 #endif
  54 
  55 // Node
  56 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  57 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  58 ConcurrentHashTable<VALUE, CONFIG, F>::
  59   Node::next() const
  60 {
  61   return OrderAccess::load_acquire(&_next);
  62 }
  63 
  64 // Bucket
  65 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  66 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  67 ConcurrentHashTable<VALUE, CONFIG, F>::
  68   Bucket::first_raw() const
  69 {
  70   return OrderAccess::load_acquire(&_first);
  71 }
  72 
  73 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  74 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
  75   Bucket::release_assign_node_ptr(
  76     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst,
  77     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const
  78 {
  79   // Due to this assert this methods is not static.
  80   assert(is_locked(), "Must be locked.");
  81   Node** tmp = (Node**)dst;
  82   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
  83 }
  84 
  85 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  86 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  87 ConcurrentHashTable<VALUE, CONFIG, F>::
  88   Bucket::first() const
  89 {
  90   // We strip the states bit before returning the ptr.
  91   return clear_state(OrderAccess::load_acquire(&_first));
  92 }
  93 
  94 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  95 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  96   Bucket::have_redirect() const
  97 {
  98   return is_state(first_raw(), STATE_REDIRECT_BIT);
  99 }
 100 
 101 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 102 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 103   Bucket::is_locked() const
 104 {
 105   return is_state(first_raw(), STATE_LOCK_BIT);
 106 }
 107 
 108 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 109 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 110   Bucket::lock()
 111 {
 112   int i = 0;
 113   // SpinYield would be unfair here
 114   while (!this->trylock()) {
 115     if ((++i) == SPINPAUSES_PER_YIELD) {
 116       // On contemporary OS yielding will give CPU to another runnable thread if
 117       // there is no CPU available.
 118       os::naked_yield();
 119       i = 0;
 120     } else {
 121       SpinPause();
 122     }
 123   }
 124 }
 125 
 126 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 127 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 128   Bucket::release_assign_last_node_next(
 129      typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node)
 130 {
 131   assert(is_locked(), "Must be locked.");
 132   Node* const volatile * ret = first_ptr();
 133   while (clear_state(*ret) != NULL) {
 134     ret = clear_state(*ret)->next_ptr();
 135   }
 136   release_assign_node_ptr(ret, node);
 137 }
 138 
 139 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 140 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 141   Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node,
 142                     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect
 143                     )
 144 {
 145   if (is_locked()) {
 146     return false;
 147   }
 148   if (Atomic::cmpxchg(node, &_first, expect) == expect) {
 149     return true;
 150   }
 151   return false;
 152 }
 153 
 154 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 155 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 156   Bucket::trylock()
 157 {
 158   if (is_locked()) {
 159     return false;
 160   }
 161   // We will expect a clean first pointer.
 162   Node* tmp = first();
 163   if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) {
 164     return true;
 165   }
 166   return false;
 167 }
 168 
 169 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 170 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 171   Bucket::unlock()
 172 {
 173   assert(is_locked(), "Must be locked.");
 174   assert(!have_redirect(),
 175          "Unlocking a bucket after it has reached terminal state.");
 176   OrderAccess::release_store(&_first, clear_state(first()));
 177 }
 178 
 179 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 180 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 181   Bucket::redirect()
 182 {
 183   assert(is_locked(), "Must be locked.");
 184   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
 185 }
 186 
 187 // InternalTable
 188 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 189 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 190   InternalTable::InternalTable(size_t log2_size)
 191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
 192       _hash_mask(~(~((size_t)0) << _log2_size))
 193 {
 194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
 195          "Bad size");
 196   void* memory = NEW_C_HEAP_ARRAY(Bucket, _size, F);
 197   _buckets = new (memory) Bucket[_size];
 198 }
 199 
 200 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 201 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 202   InternalTable::~InternalTable()
 203 {
 204   FREE_C_HEAP_ARRAY(Bucket, _buckets);
 205 }
 206 
 207 // ScopedCS
 208 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 209 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 210   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht)
 211     : _thread(thread), _cht(cht)
 212 {
 213   GlobalCounter::critical_section_begin(_thread);
 214   // This version is published now.
 215   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
 216     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
 217   }
 218 }
 219 
 220 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 221 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 222   ScopedCS::~ScopedCS()
 223 {
 224   GlobalCounter::critical_section_end(_thread);
 225 }
 226 
 227 // BaseConfig
 228 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 229 inline void* ConcurrentHashTable<VALUE, CONFIG, F>::
 230   BaseConfig::allocate_node(size_t size, const VALUE& value)
 231 {
 232   return AllocateHeap(size, F);
 233 }
 234 
 235 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 236 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 237   BaseConfig::free_node(void* memory, const VALUE& value)
 238 {
 239   FreeHeap(memory);
 240 }
 241 
 242 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 243 template <typename LOOKUP_FUNC>
 244 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
 245   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
 246 {
 247   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
 248 }
 249 
 250 // HaveDeletables
 251 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 252 template <typename EVALUATE_FUNC>
 253 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 254   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 255                                                       EVALUATE_FUNC& eval_f,
 256                                                       Bucket* prefetch_bucket)
 257 {
 258   // Instantiated for pointer type (true), so we can use prefetch.
 259   // When visiting all Nodes doing this prefetch give around 30%.
 260   Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
 261   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 262     if (pref != NULL) {
 263       Prefetch::read(*pref->value(), 0);
 264       pref = pref->next();
 265     }
 266     // Read next() Node* once.  May be racing with a thread moving the next
 267     // pointers.
 268     Node* next_pref = next->next();
 269     if (next_pref != NULL) {
 270       Prefetch::read(*next_pref->value(), 0);
 271     }
 272     if (eval_f(next->value())) {
 273       return true;
 274     }
 275   }
 276   return false;
 277 }
 278 
 279 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 280 template <bool b, typename EVALUATE_FUNC>
 281 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 282   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 283                                                    EVALUATE_FUNC& eval_f,
 284                                                    Bucket* preb)
 285 {
 286   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 287     if (eval_f(next->value())) {
 288       return true;
 289     }
 290   }
 291   return false;
 292 }
 293 
 294 // ConcurrentHashTable
 295 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 296 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 297   write_synchonize_on_visible_epoch(Thread* thread)
 298 {
 299   assert(_resize_lock_owner == thread, "Re-size lock not held");
 300   OrderAccess::fence(); // Prevent below load from floating up.
 301   // If no reader saw this version we can skip write_synchronize.
 302   if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
 303     return;
 304   }
 305   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
 306   // We set this/next version that we are synchronizing for to not published.
 307   // A reader will zero this flag if it reads this/next version.
 308   OrderAccess::release_store(&_invisible_epoch, thread);
 309   GlobalCounter::write_synchronize();
 310 }
 311 
 312 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 313 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 314   try_resize_lock(Thread* locker)
 315 {
 316   if (_resize_lock->try_lock()) {
 317     if (_resize_lock_owner != NULL) {
 318       assert(locker != _resize_lock_owner, "Already own lock");
 319       // We got mutex but internal state is locked.
 320       _resize_lock->unlock();
 321       return false;
 322     }
 323   } else {
 324     return false;
 325   }
 326   _invisible_epoch = 0;
 327   _resize_lock_owner = locker;
 328   return true;
 329 }
 330 
 331 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 332 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 333   lock_resize_lock(Thread* locker)
 334 {
 335   size_t i = 0;
 336   // If lock is hold by some other thread, the chances that it is return quick
 337   // is low. So we will prefer yielding.
 338   SpinYield yield(1, 512);
 339   do {
 340     _resize_lock->lock_without_safepoint_check();
 341     // If holder of lock dropped mutex for safepoint mutex might be unlocked,
 342     // and _resize_lock_owner will contain the owner.
 343     if (_resize_lock_owner != NULL) {
 344       assert(locker != _resize_lock_owner, "Already own lock");
 345       // We got mutex but internal state is locked.
 346       _resize_lock->unlock();
 347       yield.wait();
 348     } else {
 349       break;
 350     }
 351   } while(true);
 352   _resize_lock_owner = locker;
 353   _invisible_epoch = 0;
 354 }
 355 
 356 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 357 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 358   unlock_resize_lock(Thread* locker)
 359 {
 360   _invisible_epoch = 0;
 361   assert(locker == _resize_lock_owner, "Not unlocked by locker.");
 362   _resize_lock_owner = NULL;
 363   _resize_lock->unlock();
 364 }
 365 
 366 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 367 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 368   free_nodes()
 369 {
 370   // We assume we are not MT during freeing.
 371   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
 372     Bucket* bucket = _table->get_buckets() + node_it;
 373     Node* node = bucket->first();
 374     while (node != NULL) {
 375       Node* free_node = node;
 376       node = node->next();
 377       Node::destroy_node(free_node);
 378     }
 379   }
 380 }
 381 
 382 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 383 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 384 ConcurrentHashTable<VALUE, CONFIG, F>::
 385   get_table() const
 386 {
 387   return OrderAccess::load_acquire(&_table);
 388 }
 389 
 390 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 391 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 392 ConcurrentHashTable<VALUE, CONFIG, F>::
 393   get_new_table() const
 394 {
 395   return OrderAccess::load_acquire(&_new_table);
 396 }
 397 
 398 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 399 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 400 ConcurrentHashTable<VALUE, CONFIG, F>::
 401   set_table_from_new()
 402 {
 403   InternalTable* old_table = _table;
 404   // Publish the new table.
 405   OrderAccess::release_store(&_table, _new_table);
 406   // All must see this.
 407   GlobalCounter::write_synchronize();
 408   // _new_table not read any more.
 409   _new_table = NULL;
 410   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
 411   return old_table;
 412 }
 413 
 414 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 415 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 416   internal_grow_range(Thread* thread, size_t start, size_t stop)
 417 {
 418   assert(stop <= _table->_size, "Outside backing array");
 419   assert(_new_table != NULL, "Grow not proper setup before start");
 420   // The state is also copied here. Hence all buckets in new table will be
 421   // locked. I call the siblings odd/even, where even have high bit 0 and odd
 422   // have high bit 1.
 423   for (size_t even_index = start; even_index < stop; even_index++) {
 424     Bucket* bucket = _table->get_bucket(even_index);
 425 
 426     bucket->lock();
 427 
 428     size_t odd_index = even_index + _table->_size;
 429     _new_table->get_buckets()[even_index] = *bucket;
 430     _new_table->get_buckets()[odd_index] = *bucket;
 431 
 432     // Moves lockers go to new table, where they will wait until unlock() below.
 433     bucket->redirect(); /* Must release stores above */
 434 
 435     // When this is done we have separated the nodes into corresponding buckets
 436     // in new table.
 437     if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) {
 438       // If bucket is empty, unzip does nothing.
 439       // We must make sure readers go to new table before we poison the bucket.
 440       DEBUG_ONLY(GlobalCounter::write_synchronize();)
 441     }
 442 
 443     // Unlock for writes into the new table buckets.
 444     _new_table->get_bucket(even_index)->unlock();
 445     _new_table->get_bucket(odd_index)->unlock();
 446 
 447     DEBUG_ONLY(
 448        bucket->release_assign_node_ptr(
 449           _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
 450     )
 451   }
 452 }
 453 
 454 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 455 template <typename LOOKUP_FUNC, typename DELETE_FUNC>
 456 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 457   internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
 458 {
 459   Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 460   assert(bucket->is_locked(), "Must be locked.");
 461   Node* const volatile * rem_n_prev = bucket->first_ptr();
 462   Node* rem_n = bucket->first();
 463   bool have_dead = false;
 464   while (rem_n != NULL) {
 465     if (lookup_f.equals(rem_n->value(), &have_dead)) {
 466       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 467       break;
 468     } else {
 469       rem_n_prev = rem_n->next_ptr();
 470       rem_n = rem_n->next();
 471     }
 472   }
 473 
 474   bucket->unlock();
 475 
 476   if (rem_n == NULL) {
 477     return false;
 478   }
 479   // Publish the deletion.
 480   GlobalCounter::write_synchronize();
 481   delete_f(rem_n->value());
 482   Node::destroy_node(rem_n);
 483   return true;
 484 }
 485 
 486 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 487 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 488 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 489   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
 490                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
 491 {
 492   // Here we have resize lock so table is SMR safe, and there is no new
 493   // table. Can do this in parallel if we want.
 494   assert((is_mt && _resize_lock_owner != NULL) ||
 495          (!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
 496   Node* ndel[BULK_DELETE_LIMIT];
 497   InternalTable* table = get_table();
 498   assert(start_idx < stop_idx, "Must be");
 499   assert(stop_idx <= _table->_size, "Must be");
 500   // Here manual do critical section since we don't want to take the cost of
 501   // locking the bucket if there is nothing to delete. But we can have
 502   // concurrent single deletes. The _invisible_epoch can only be used by the
 503   // owner of _resize_lock, us here. There we should not changed it in our
 504   // own read-side.
 505   GlobalCounter::critical_section_begin(thread);
 506   for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
 507     Bucket* bucket = table->get_bucket(bucket_it);
 508     Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
 509                               table->get_bucket(bucket_it+1) : NULL;
 510 
 511     if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
 512         have_deletable(bucket, eval_f, prefetch_bucket)) {
 513         // Nothing to remove in this bucket.
 514         continue;
 515     }
 516 
 517     GlobalCounter::critical_section_end(thread);
 518     // We left critical section but the bucket cannot be removed while we hold
 519     // the _resize_lock.
 520     bucket->lock();
 521     size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
 522     bucket->unlock();
 523     if (is_mt) {
 524       GlobalCounter::write_synchronize();
 525     } else {
 526       write_synchonize_on_visible_epoch(thread);
 527     }
 528     for (size_t node_it = 0; node_it < nd; node_it++) {
 529       del_f(ndel[node_it]->value());
 530       Node::destroy_node(ndel[node_it]);
 531       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 532     }
 533     GlobalCounter::critical_section_begin(thread);
 534   }
 535   GlobalCounter::critical_section_end(thread);
 536 }
 537 
 538 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 539 template <typename LOOKUP_FUNC>
 540 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 541   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 542 {
 543   assert(bucket->is_locked(), "Must be locked.");
 544 
 545   size_t dels = 0;
 546   Node* ndel[BULK_DELETE_LIMIT];
 547   Node* const volatile * rem_n_prev = bucket->first_ptr();
 548   Node* rem_n = bucket->first();
 549   while (rem_n != NULL) {
 550     bool is_dead = false;
 551     lookup_f.equals(rem_n->value(), &is_dead);
 552     if (is_dead) {
 553       ndel[dels++] = rem_n;
 554       Node* next_node = rem_n->next();
 555       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 556       rem_n = next_node;
 557       if (dels == BULK_DELETE_LIMIT) {
 558         break;
 559       }
 560     } else {
 561       rem_n_prev = rem_n->next_ptr();
 562       rem_n = rem_n->next();
 563     }
 564   }
 565   if (dels > 0) {
 566     GlobalCounter::write_synchronize();
 567     for (size_t node_it = 0; node_it < dels; node_it++) {
 568       Node::destroy_node(ndel[node_it]);
 569       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 570     }
 571   }
 572 }
 573 
 574 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 575 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 576 ConcurrentHashTable<VALUE, CONFIG, F>::
 577   get_bucket(uintx hash) const
 578 {
 579   InternalTable* table = get_table();
 580   Bucket* bucket = get_bucket_in(table, hash);
 581   if (bucket->have_redirect()) {
 582     table = get_new_table();
 583     bucket = get_bucket_in(table, hash);
 584   }
 585   return bucket;
 586 }
 587 
 588 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 589 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 590 ConcurrentHashTable<VALUE, CONFIG, F>::
 591   get_bucket_locked(Thread* thread, const uintx hash)
 592 {
 593   Bucket* bucket;
 594   int i = 0;
 595   // SpinYield would be unfair here
 596   while(true) {
 597     {
 598       // We need a critical section to protect the table itself. But if we fail
 599       // we must leave critical section otherwise we would deadlock.
 600       ScopedCS cs(thread, this);
 601       bucket = get_bucket(hash);
 602       if (bucket->trylock()) {
 603         break; /* ends critical section */
 604       }
 605     } /* ends critical section */
 606     if ((++i) == SPINPAUSES_PER_YIELD) {
 607       // On contemporary OS yielding will give CPU to another runnable thread if
 608       // there is no CPU available.
 609       os::naked_yield();
 610       i = 0;
 611     } else {
 612       SpinPause();
 613     }
 614   }
 615   return bucket;
 616 }
 617 
 618 // Always called within critical section
 619 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 620 template <typename LOOKUP_FUNC>
 621 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
 622 ConcurrentHashTable<VALUE, CONFIG, F>::
 623   get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
 624            bool* have_dead, size_t* loops) const
 625 {
 626   size_t loop_count = 0;
 627   Node* node = bucket->first();
 628   while (node != NULL) {
 629     bool is_dead = false;
 630     ++loop_count;
 631     if (lookup_f.equals(node->value(), &is_dead)) {
 632       break;
 633     }
 634     if (is_dead && !(*have_dead)) {
 635       *have_dead = true;
 636     }
 637     node = node->next();
 638   }
 639   if (loops != NULL) {
 640     *loops = loop_count;
 641   }
 642   return node;
 643 }
 644 
 645 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 646 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 647   unzip_bucket(Thread* thread, InternalTable* old_table,
 648                InternalTable* new_table, size_t even_index, size_t odd_index)
 649 {
 650   Node* aux = old_table->get_bucket(even_index)->first();
 651   if (aux == NULL) {
 652     // This is an empty bucket and in debug we poison first ptr in bucket.
 653     // Therefore we must make sure no readers are looking at this bucket.
 654     // If we don't do a write_synch here, caller must do it.
 655     return false;
 656   }
 657   Node* delete_me = NULL;
 658   Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
 659   Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
 660   while (aux != NULL) {
 661     bool dead_hash = false;
 662     size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
 663     Node* aux_next = aux->next();
 664     if (dead_hash) {
 665       delete_me = aux;
 666       // This item is dead, move both list to next
 667       new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 668                                                                 aux_next);
 669       new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 670                                                                  aux_next);
 671     } else {
 672       size_t aux_index = bucket_idx_hash(new_table, aux_hash);
 673       if (aux_index == even_index) {
 674         // This is a even, so move odd to aux/even next
 675         new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 676                                                                   aux_next);
 677         // Keep in even list
 678         even = aux->next_ptr();
 679       } else if (aux_index == odd_index) {
 680         // This is a odd, so move odd to aux/odd next
 681         new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 682                                                                    aux_next);
 683         // Keep in odd list
 684         odd = aux->next_ptr();
 685       } else {
 686         fatal("aux_index does not match even or odd indices");
 687       }
 688     }
 689     aux = aux_next;
 690 
 691     // We can only move 1 pointer otherwise a reader might be moved to the wrong
 692     // chain. E.g. looking for even hash value but got moved to the odd bucket
 693     // chain.
 694     write_synchonize_on_visible_epoch(thread);
 695     if (delete_me != NULL) {
 696       Node::destroy_node(delete_me);
 697       delete_me = NULL;
 698     }
 699   }
 700   return true;
 701 }
 702 
 703 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 704 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 705   internal_shrink_prolog(Thread* thread, size_t log2_size)
 706 {
 707   if (!try_resize_lock(thread)) {
 708     return false;
 709   }
 710   assert(_resize_lock_owner == thread, "Re-size lock not held");
 711   if (_table->_log2_size == _log2_start_size ||
 712       _table->_log2_size <= log2_size) {
 713     unlock_resize_lock(thread);
 714     return false;
 715   }
 716   _new_table = new InternalTable(_table->_log2_size - 1);
 717   return true;
 718 }
 719 
 720 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 721 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 722   internal_shrink_epilog(Thread* thread)
 723 {
 724   assert(_resize_lock_owner == thread, "Re-size lock not held");
 725 
 726   InternalTable* old_table = set_table_from_new();
 727   _size_limit_reached = false;
 728   unlock_resize_lock(thread);
 729 #ifdef ASSERT
 730   for (size_t i = 0; i < old_table->_size; i++) {
 731     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 732            "No poison found");
 733   }
 734 #endif
 735   // ABA safe, old_table not visible to any other threads.
 736   delete old_table;
 737 }
 738 
 739 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 740 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 741   internal_shrink_range(Thread* thread, size_t start, size_t stop)
 742 {
 743   // The state is also copied here.
 744   // Hence all buckets in new table will be locked.
 745   for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
 746     size_t even_hash_index = bucket_it; // High bit 0
 747     size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1
 748 
 749     Bucket* b_old_even = _table->get_bucket(even_hash_index);
 750     Bucket* b_old_odd  = _table->get_bucket(odd_hash_index);
 751 
 752     b_old_even->lock();
 753     b_old_odd->lock();
 754 
 755     _new_table->get_buckets()[bucket_it] = *b_old_even;
 756 
 757     // Put chains together.
 758     _new_table->get_bucket(bucket_it)->
 759       release_assign_last_node_next(*(b_old_odd->first_ptr()));
 760 
 761     b_old_even->redirect();
 762     b_old_odd->redirect();
 763 
 764     write_synchonize_on_visible_epoch(thread);
 765 
 766     // Unlock for writes into new smaller table.
 767     _new_table->get_bucket(bucket_it)->unlock();
 768 
 769     DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
 770                                                    (Node*)POISON_PTR);)
 771     DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
 772                                                   (Node*)POISON_PTR);)
 773   }
 774 }
 775 
 776 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 777 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 778   internal_shrink(Thread* thread, size_t log2_size)
 779 {
 780   if (!internal_shrink_prolog(thread, log2_size)) {
 781     assert(_resize_lock_owner != thread, "Re-size lock held");
 782     return false;
 783   }
 784   assert(_resize_lock_owner == thread, "Should be locked by me");
 785   internal_shrink_range(thread, 0, _new_table->_size);
 786   internal_shrink_epilog(thread);
 787   assert(_resize_lock_owner != thread, "Re-size lock held");
 788   return true;
 789 }
 790 
 791 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 792 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 793   internal_grow_prolog(Thread* thread, size_t log2_size)
 794 {
 795   // This double checking of _size_limit_reached/is_max_size_reached()
 796   //  we only do in grow path, since grow means high load on table
 797   // while shrink means low load.
 798   if (is_max_size_reached()) {
 799     return false;
 800   }
 801   if (!try_resize_lock(thread)) {
 802     // Either we have an ongoing resize or an operation which doesn't want us
 803     // to resize now.
 804     return false;
 805   }
 806   if (is_max_size_reached() || _table->_log2_size >= log2_size) {
 807     unlock_resize_lock(thread);
 808     return false;
 809   }
 810 
 811   _new_table = new InternalTable(_table->_log2_size + 1);
 812 
 813   if (_new_table->_log2_size == _log2_size_limit) {
 814     _size_limit_reached = true;
 815   }
 816 
 817   return true;
 818 }
 819 
 820 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 821 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 822   internal_grow_epilog(Thread* thread)
 823 {
 824   assert(_resize_lock_owner == thread, "Should be locked");
 825 
 826   InternalTable* old_table = set_table_from_new();
 827   unlock_resize_lock(thread);
 828 #ifdef ASSERT
 829   for (size_t i = 0; i < old_table->_size; i++) {
 830     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 831            "No poison found");
 832   }
 833 #endif
 834   // ABA safe, old_table not visible to any other threads.
 835   delete old_table;
 836 }
 837 
 838 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 839 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 840   internal_grow(Thread* thread, size_t log2_size)
 841 {
 842   if (!internal_grow_prolog(thread, log2_size)) {
 843     assert(_resize_lock_owner != thread, "Re-size lock held");
 844     return false;
 845   }
 846   assert(_resize_lock_owner == thread, "Should be locked by me");
 847   internal_grow_range(thread, 0, _table->_size);
 848   internal_grow_epilog(thread);
 849   assert(_resize_lock_owner != thread, "Re-size lock held");
 850   return true;
 851 }
 852 
 853 // Always called within critical section
 854 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 855 template <typename LOOKUP_FUNC>
 856 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
 857   internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
 858 {
 859   bool clean = false;
 860   size_t loops = 0;
 861   VALUE* ret = NULL;
 862 
 863   const Bucket* bucket = get_bucket(lookup_f.get_hash());
 864   Node* node = get_node(bucket, lookup_f, &clean, &loops);
 865   if (node != NULL) {
 866     ret = node->value();
 867   }
 868   if (grow_hint != NULL) {
 869     *grow_hint = loops > _grow_hint;
 870   }
 871 
 872   return ret;
 873 }
 874 
 875 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 876 template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
 877 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 878   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f,
 879                   CALLBACK_FUNC& callback, bool* grow_hint, bool* clean_hint)
 880 {
 881   bool ret = false;
 882   bool clean = false;
 883   bool locked;
 884   size_t loops = 0;
 885   size_t i = 0;
 886   Node* new_node = NULL;
 887   uintx hash = lookup_f.get_hash();
 888   while (true) {
 889     {
 890       ScopedCS cs(thread, this); /* protected the table/bucket */
 891       Bucket* bucket = get_bucket(hash);
 892 
 893       Node* first_at_start = bucket->first();
 894       Node* old = get_node(bucket, lookup_f, &clean, &loops);
 895       if (old == NULL) {
 896         // No duplicate found.
 897         if (new_node == NULL) {
 898           new_node = Node::create_node(value_f(), first_at_start);
 899         } else {
 900           new_node->set_next(first_at_start);
 901         }
 902         if (bucket->cas_first(new_node, first_at_start)) {
 903           callback(true, new_node->value());
 904           new_node = NULL;
 905           ret = true;
 906           break; /* leave critical section */
 907         }
 908         // CAS failed we must leave critical section and retry.
 909         locked = bucket->is_locked();
 910       } else {
 911         // There is a duplicate.
 912         callback(false, old->value());
 913         break; /* leave critical section */
 914       }
 915     } /* leave critical section */
 916     i++;
 917     if (locked) {
 918       os::naked_yield();
 919     } else {
 920       SpinPause();
 921     }
 922   }
 923 
 924   if (new_node != NULL) {
 925     // CAS failed and a duplicate was inserted, we must free this node.
 926     Node::destroy_node(new_node);
 927   } else if (i == 0 && clean) {
 928     // We only do cleaning on fast inserts.
 929     Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 930     delete_in_bucket(thread, bucket, lookup_f);
 931     bucket->unlock();
 932 
 933     clean = false;
 934   }
 935 
 936   if (grow_hint != NULL) {
 937     *grow_hint = loops > _grow_hint;
 938   }
 939 
 940   if (clean_hint != NULL) {
 941     *clean_hint = clean;
 942   }
 943 
 944   return ret;
 945 }
 946 
 947 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 948 template <typename FUNC>
 949 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 950   visit_nodes(Bucket* bucket, FUNC& visitor_f)
 951 {
 952   Node* current_node = bucket->first();
 953   while (current_node != NULL) {
 954     if (!visitor_f(current_node->value())) {
 955       return false;
 956     }
 957     current_node = current_node->next();
 958   }
 959   return true;
 960 }
 961 
 962 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 963 template <typename FUNC>
 964 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 965   do_scan_locked(Thread* thread, FUNC& scan_f)
 966 {
 967   assert(_resize_lock_owner == thread, "Re-size lock not held");
 968   // We can do a critical section over the entire loop but that would block
 969   // updates for a long time. Instead we choose to block resizes.
 970   InternalTable* table = get_table();
 971   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
 972     ScopedCS cs(thread, this);
 973     if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
 974       break; /* ends critical section */
 975     }
 976   } /* ends critical section */
 977 }
 978 
 979 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 980 template <typename EVALUATE_FUNC>
 981 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
 982   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
 983                      size_t num_del, Node** ndel)
 984 {
 985   size_t dels = 0;
 986   Node* const volatile * rem_n_prev = bucket->first_ptr();
 987   Node* rem_n = bucket->first();
 988   while (rem_n != NULL) {
 989     if (eval_f(rem_n->value())) {
 990       ndel[dels++] = rem_n;
 991       Node* next_node = rem_n->next();
 992       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 993       rem_n = next_node;
 994       if (dels == num_del) {
 995         break;
 996       }
 997     } else {
 998       rem_n_prev = rem_n->next_ptr();
 999       rem_n = rem_n->next();
1000     }
1001   }
1002   return dels;
1003 }
1004 
1005 // Constructor
1006 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1007 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1008   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
1009     : _new_table(NULL), _log2_start_size(log2size),
1010        _log2_size_limit(log2size_limit), _grow_hint(grow_hint),
1011        _size_limit_reached(false), _resize_lock_owner(NULL),
1012        _invisible_epoch(0)
1013 {
1014   _resize_lock =
1015     new Mutex(Mutex::leaf, "ConcurrentHashTable", false,
1016               Monitor::_safepoint_check_never);
1017   _table = new InternalTable(log2size);
1018   assert(log2size_limit >= log2size, "bad ergo");
1019   _size_limit_reached = _table->_log2_size == _log2_size_limit;
1020 }
1021 
1022 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1023 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1024   ~ConcurrentHashTable()
1025 {
1026   delete _resize_lock;
1027   free_nodes();
1028   delete _table;
1029 }
1030 
1031 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1032 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
1033   get_size_log2(Thread* thread)
1034 {
1035   ScopedCS cs(thread, this);
1036   return _table->_log2_size;
1037 }
1038 
1039 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1040 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1041   shrink(Thread* thread, size_t size_limit_log2)
1042 {
1043   size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
1044   bool ret = internal_shrink(thread, tmp);
1045   return ret;
1046 }
1047 
1048 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1049 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1050   grow(Thread* thread, size_t size_limit_log2)
1051 {
1052   size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
1053   return internal_grow(thread, tmp);
1054 }
1055 
1056 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1057 template <typename LOOKUP_FUNC, typename FOUND_FUNC>
1058 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1059   get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
1060 {
1061   bool ret = false;
1062   ScopedCS cs(thread, this);
1063   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1064   if (val != NULL) {
1065     found_f(val);
1066     ret = true;
1067   }
1068   return ret;
1069 }
1070 
1071 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1072 template <typename LOOKUP_FUNC>
1073 inline VALUE ConcurrentHashTable<VALUE, CONFIG, F>::
1074   get_copy(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
1075 {
1076   ScopedCS cs(thread, this);
1077   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1078   return val != NULL ? *val : CONFIG::notfound();
1079 }
1080 
1081 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1082 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1083   unsafe_insert(const VALUE& value) {
1084   bool dead_hash = false;
1085   size_t hash = CONFIG::get_hash(value, &dead_hash);
1086   if (dead_hash) {
1087     return false;
1088   }
1089   // This is an unsafe operation.
1090   InternalTable* table = get_table();
1091   Bucket* bucket = get_bucket_in(table, hash);
1092   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1093   Node* new_node = Node::create_node(value, bucket->first());
1094   if (!bucket->cas_first(new_node, bucket->first())) {
1095     assert(false, "bad");
1096   }
1097   return true;
1098 }
1099 
1100 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1101 template <typename SCAN_FUNC>
1102 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1103   try_scan(Thread* thread, SCAN_FUNC& scan_f)
1104 {
1105   if (!try_resize_lock(thread)) {
1106     return false;
1107   }
1108   do_scan_locked(thread, scan_f);
1109   unlock_resize_lock(thread);
1110   return true;
1111 }
1112 
1113 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1114 template <typename SCAN_FUNC>
1115 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1116   do_scan(Thread* thread, SCAN_FUNC& scan_f)
1117 {
1118   assert(_resize_lock_owner != thread, "Re-size lock held");
1119   lock_resize_lock(thread);
1120   do_scan_locked(thread, scan_f);
1121   unlock_resize_lock(thread);
1122   assert(_resize_lock_owner != thread, "Re-size lock held");
1123 }
1124 
1125 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1126 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1127 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1128   try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1129 {
1130   if (!try_resize_lock(thread)) {
1131     return false;
1132   }
1133   do_bulk_delete_locked(thread, eval_f, del_f);
1134   unlock_resize_lock(thread);
1135   assert(_resize_lock_owner != thread, "Re-size lock held");
1136   return true;
1137 }
1138 
1139 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1140 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1141 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1142   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1143 {
1144   lock_resize_lock(thread);
1145   do_bulk_delete_locked(thread, eval_f, del_f);
1146   unlock_resize_lock(thread);
1147 }
1148 
1149 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1150 template <typename VALUE_SIZE_FUNC>
1151 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1152   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1153                 outputStream* st, const char* table_name)
1154 {
1155   NumberSeq summary;
1156   size_t literal_bytes = 0;
1157   if (!try_resize_lock(thread)) {
1158     st->print_cr("statistics unavailable at this moment");
1159     return;
1160   }
1161 
1162   InternalTable* table = get_table();
1163   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1164     ScopedCS cs(thread, this);
1165     size_t count = 0;
1166     Bucket* bucket = table->get_bucket(bucket_it);
1167     if (bucket->have_redirect() || bucket->is_locked()) {
1168         continue;
1169     }
1170     Node* current_node = bucket->first();
1171     while (current_node != NULL) {
1172       ++count;
1173       literal_bytes += vs_f(current_node->value());
1174       current_node = current_node->next();
1175     }
1176     summary.add((double)count);
1177   }
1178 
1179   double num_buckets = summary.num();
1180   double num_entries = summary.sum();
1181 
1182   size_t bucket_bytes = num_buckets * sizeof(Bucket);
1183   size_t entry_bytes  = num_entries * sizeof(Node);
1184   size_t total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
1185 
1186   size_t bucket_size  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
1187   size_t entry_size   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
1188 
1189   st->print_cr("%s statistics:", table_name);
1190   st->print_cr("Number of buckets       : %9" PRIuPTR " = %9" PRIuPTR
1191                " bytes, each " SIZE_FORMAT,
1192                (size_t)num_buckets, bucket_bytes,  bucket_size);
1193   st->print_cr("Number of entries       : %9" PRIuPTR " = %9" PRIuPTR
1194                " bytes, each " SIZE_FORMAT,
1195                (size_t)num_entries, entry_bytes,   entry_size);
1196   if (literal_bytes != 0) {
1197     double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
1198     st->print_cr("Number of literals      : %9" PRIuPTR " = %9" PRIuPTR
1199                  " bytes, avg %7.3f",
1200                  (size_t)num_entries, literal_bytes, literal_avg);
1201   }
1202   st->print_cr("Total footprsize_t         : %9s = %9" PRIuPTR " bytes", ""
1203                , total_bytes);
1204   st->print_cr("Average bucket size     : %9.3f", summary.avg());
1205   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
1206   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
1207   st->print_cr("Maximum bucket size     : %9" PRIuPTR,
1208                (size_t)summary.maximum());
1209   unlock_resize_lock(thread);
1210 }
1211 
1212 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1213 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1214   try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht)
1215 {
1216   if (!try_resize_lock(thread)) {
1217     return false;
1218   }
1219   assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");
1220   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1221     Bucket* bucket = _table->get_bucket(bucket_it);
1222     assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1223     while (bucket->first() != NULL) {
1224       Node* move_node = bucket->first();
1225       bool ok = bucket->cas_first(move_node->next(), move_node);
1226       assert(ok, "Uncontended cas must work");
1227       bool dead_hash = false;
1228       size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1229       if (!dead_hash) {
1230         Bucket* insert_bucket = to_cht->get_bucket(insert_hash);
1231         assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present");
1232         move_node->set_next(insert_bucket->first());
1233         ok = insert_bucket->cas_first(move_node, insert_bucket->first());
1234         assert(ok, "Uncontended cas must work");
1235       }
1236     }
1237   }
1238   unlock_resize_lock(thread);
1239   return true;
1240 }
1241 
1242 #endif // include guard