1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP
  26 #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP
  27 
  28 #include "memory/allocation.inline.hpp"
  29 #include "runtime/atomic.hpp"
  30 #include "runtime/orderAccess.inline.hpp"
  31 #include "runtime/prefetch.inline.hpp"
  32 #include "utilities/concurrentHashTable.hpp"
  33 #include "utilities/globalCounter.inline.hpp"
  34 #include "utilities/numberSeq.hpp"
  35 #include "utilities/spinYield.hpp"
  36 
  37 // 2^30 = 1G buckets
  38 #define SIZE_BIG_LOG2 30
  39 // 2^5  = 32 buckets
  40 #define SIZE_SMALL_LOG2 5
  41 
  42 // Number from spinYield.hpp. In some loops SpinYield would be unfair.
  43 #define SPINPAUSES_PER_YIELD 8192
  44 
  45 #ifdef ASSERT
  46 #ifdef _LP64
  47 // Two low bits are not usable.
  48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
  49 #else
  50 // Two low bits are not usable.
  51 static const void* POISON_PTR = (void*)0xffbadbac;
  52 #endif
  53 #endif
  54 
  55 // Node
  56 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  57 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  58 ConcurrentHashTable<VALUE, CONFIG, F>::
  59   Node::next() const
  60 {
  61   return OrderAccess::load_acquire(&_next);
  62 }
  63 
  64 // Bucket
  65 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  66 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  67 ConcurrentHashTable<VALUE, CONFIG, F>::
  68   Bucket::first_raw() const
  69 {
  70   return OrderAccess::load_acquire(&_first);
  71 }
  72 
  73 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  74 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
  75   Bucket::release_assign_node_ptr(
  76     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst,
  77     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const
  78 {
  79   // Due to this assert this methods is not static.
  80   assert(is_locked(), "Must be locked.");
  81   Node** tmp = (Node**)dst;
  82   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
  83 }
  84 
  85 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  86 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  87 ConcurrentHashTable<VALUE, CONFIG, F>::
  88   Bucket::first() const
  89 {
  90   // We strip the states bit before returning the ptr.
  91   return clear_state(OrderAccess::load_acquire(&_first));
  92 }
  93 
  94 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  95 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  96   Bucket::have_redirect() const
  97 {
  98   return is_state(first_raw(), STATE_REDIRECT_BIT);
  99 }
 100 
 101 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 102 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 103   Bucket::is_locked() const
 104 {
 105   return is_state(first_raw(), STATE_LOCK_BIT);
 106 }
 107 
 108 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 109 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 110   Bucket::lock()
 111 {
 112   int i = 0;
 113   // SpinYield would be unfair here
 114   while (!this->trylock()) {
 115     if ((++i) == SPINPAUSES_PER_YIELD) {
 116       // On contemporary OS yielding will give CPU to another runnable thread if
 117       // there is no CPU available.
 118       os::naked_yield();
 119       i = 0;
 120     } else {
 121       SpinPause();
 122     }
 123   }
 124 }
 125 
 126 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 127 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 128   Bucket::release_assign_last_node_next(
 129      typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node)
 130 {
 131   assert(is_locked(), "Must be locked.");
 132   Node* const volatile * ret = first_ptr();
 133   while (clear_state(*ret) != NULL) {
 134     ret = clear_state(*ret)->next_ptr();
 135   }
 136   release_assign_node_ptr(ret, node);
 137 }
 138 
 139 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 140 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 141   Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node,
 142                     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect
 143                     )
 144 {
 145   if (is_locked()) {
 146     return false;
 147   }
 148   if (Atomic::cmpxchg(node, &_first, expect) == expect) {
 149     return true;
 150   }
 151   return false;
 152 }
 153 
 154 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 155 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 156   Bucket::trylock()
 157 {
 158   if (is_locked()) {
 159     return false;
 160   }
 161   // We will expect a clean first pointer.
 162   Node* tmp = first();
 163   if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) {
 164     return true;
 165   }
 166   return false;
 167 }
 168 
 169 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 170 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 171   Bucket::unlock()
 172 {
 173   assert(is_locked(), "Must be locked.");
 174   assert(!have_redirect(),
 175          "Unlocking a bucket after it has reached terminal state.");
 176   OrderAccess::release_store(&_first, clear_state(first()));
 177 }
 178 
 179 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 180 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 181   Bucket::redirect()
 182 {
 183   assert(is_locked(), "Must be locked.");
 184   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
 185 }
 186 
 187 // InternalTable
 188 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 189 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 190   InternalTable::InternalTable(size_t log2_size)
 191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
 192       _hash_mask(~(~((size_t)0) << _log2_size))
 193 {
 194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
 195          "Bad size");
 196   void* memory = NEW_C_HEAP_ARRAY(Bucket, _size, F);
 197   _buckets = new (memory) Bucket[_size];
 198 }
 199 
 200 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 201 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 202   InternalTable::~InternalTable()
 203 {
 204   FREE_C_HEAP_ARRAY(Bucket, _buckets);
 205 }
 206 
 207 // ScopedCS
 208 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 209 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 210   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht)
 211     : _thread(thread), _cht(cht)
 212 {
 213   GlobalCounter::critical_section_begin(_thread);
 214   // This version is published now.
 215   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
 216     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
 217   }
 218 }
 219 
 220 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 221 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 222   ScopedCS::~ScopedCS()
 223 {
 224   GlobalCounter::critical_section_end(_thread);
 225 }
 226 
 227 // BaseConfig
 228 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 229 inline void* ConcurrentHashTable<VALUE, CONFIG, F>::
 230   BaseConfig::allocate_node(size_t size, const VALUE& value)
 231 {
 232   return AllocateHeap(size, F);
 233 }
 234 
 235 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 236 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 237   BaseConfig::free_node(void* memory, const VALUE& value)
 238 {
 239   FreeHeap(memory);
 240 }
 241 
 242 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 243 template <typename LOOKUP_FUNC>
 244 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
 245   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
 246 {
 247   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
 248 }
 249 
 250 // HaveDeletables
 251 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 252 template <typename EVALUATE_FUNC>
 253 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 254   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 255                                                       EVALUATE_FUNC& eval_f,
 256                                                       Bucket* prefetch_bucket)
 257 {
 258   // Instantiated for pointer type (true), so we can use prefetch.
 259   // When visiting all Nodes doing this prefetch give around 30%.
 260   Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
 261   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 262     if (pref != NULL) {
 263       Prefetch::read(*pref->value(), 0);
 264       pref = pref->next();
 265     }
 266     if (next->next() != NULL) {
 267       Prefetch::read(*next->next()->value(), 0);
 268     }
 269     if (eval_f(next->value())) {
 270       return true;
 271     }
 272   }
 273   return false;
 274 }
 275 
 276 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 277 template <bool b, typename EVALUATE_FUNC>
 278 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 279   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 280                                                    EVALUATE_FUNC& eval_f,
 281                                                    Bucket* preb)
 282 {
 283   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 284     if (eval_f(next->value())) {
 285       return true;
 286     }
 287   }
 288   return false;
 289 }
 290 
 291 // ConcurrentHashTable
 292 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 293 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 294   write_synchonize_on_visible_epoch(Thread* thread)
 295 {
 296   assert(_resize_lock_owner == thread, "Re-size lock not held");
 297   OrderAccess::fence(); // Prevent below load from floating up.
 298   // If no reader saw this version we can skip write_synchronize.
 299   if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
 300     return;
 301   }
 302   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
 303   // We set this/next version that we are synchronizing for to not published.
 304   // A reader will zero this flag if it reads this/next version.
 305   OrderAccess::release_store(&_invisible_epoch, thread);
 306   GlobalCounter::write_synchronize();
 307 }
 308 
 309 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 310 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 311   try_resize_lock(Thread* locker)
 312 {
 313   if (_resize_lock->try_lock()) {
 314     if (_resize_lock_owner != NULL) {
 315       assert(locker != _resize_lock_owner, "Already own lock");
 316       // We got mutex but internal state is locked.
 317       _resize_lock->unlock();
 318       return false;
 319     }
 320   } else {
 321     return false;
 322   }
 323   _invisible_epoch = 0;
 324   _resize_lock_owner = locker;
 325   return true;
 326 }
 327 
 328 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 329 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 330   lock_resize_lock(Thread* locker)
 331 {
 332   size_t i = 0;
 333   // If lock is hold by some other thread, the chances that it is return quick
 334   // is low. So we will prefer yielding.
 335   SpinYield yield(1, 512);
 336   do {
 337     _resize_lock->lock_without_safepoint_check();
 338     // If holder of lock dropped mutex for safepoint mutex might be unlocked,
 339     // and _resize_lock_owner will contain the owner.
 340     if (_resize_lock_owner != NULL) {
 341       assert(locker != _resize_lock_owner, "Already own lock");
 342       // We got mutex but internal state is locked.
 343       _resize_lock->unlock();
 344       yield.wait();
 345     } else {
 346       break;
 347     }
 348   } while(true);
 349   _resize_lock_owner = locker;
 350   _invisible_epoch = 0;
 351 }
 352 
 353 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 354 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 355   unlock_resize_lock(Thread* locker)
 356 {
 357   _invisible_epoch = 0;
 358   assert(locker == _resize_lock_owner, "Not unlocked by locker.");
 359   _resize_lock_owner = NULL;
 360   _resize_lock->unlock();
 361 }
 362 
 363 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 364 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 365   free_nodes()
 366 {
 367   // We assume we are not MT during freeing.
 368   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
 369     Bucket* bucket = _table->get_buckets() + node_it;
 370     Node* node = bucket->first();
 371     while (node != NULL) {
 372       Node* free_node = node;
 373       node = node->next();
 374       Node::destroy_node(free_node);
 375     }
 376   }
 377 }
 378 
 379 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 380 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 381 ConcurrentHashTable<VALUE, CONFIG, F>::
 382   get_table() const
 383 {
 384   return OrderAccess::load_acquire(&_table);
 385 }
 386 
 387 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 388 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 389 ConcurrentHashTable<VALUE, CONFIG, F>::
 390   get_new_table() const
 391 {
 392   return OrderAccess::load_acquire(&_new_table);
 393 }
 394 
 395 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 396 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 397 ConcurrentHashTable<VALUE, CONFIG, F>::
 398   set_table_from_new()
 399 {
 400   InternalTable* old_table = _table;
 401   // Publish the new table.
 402   OrderAccess::release_store(&_table, _new_table);
 403   // All must see this.
 404   GlobalCounter::write_synchronize();
 405   // _new_table not read any more.
 406   _new_table = NULL;
 407   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
 408   return old_table;
 409 }
 410 
 411 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 412 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 413   internal_grow_range(Thread* thread, size_t start, size_t stop)
 414 {
 415   assert(stop <= _table->_size, "Outside backing array");
 416   assert(_new_table != NULL, "Grow not proper setup before start");
 417   // The state is also copied here. Hence all buckets in new table will be
 418   // locked. I call the siblings odd/even, where even have high bit 0 and odd
 419   // have high bit 1.
 420   for (size_t even_index = start; even_index < stop; even_index++) {
 421     Bucket* bucket = _table->get_bucket(even_index);
 422 
 423     bucket->lock();
 424 
 425     size_t odd_index = even_index + _table->_size;
 426     _new_table->get_buckets()[even_index] = *bucket;
 427     _new_table->get_buckets()[odd_index] = *bucket;
 428 
 429     // Moves lockers go to new table, where they will wait until unlock() below.
 430     bucket->redirect(); /* Must release stores above */
 431 
 432     // When this is done we have separated the nodes into corresponding buckets
 433     // in new table.
 434     if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) {
 435       // If bucket is empty, unzip does nothing.
 436       // We must make sure readers go to new table before we poison the bucket.
 437       DEBUG_ONLY(GlobalCounter::write_synchronize();)
 438     }
 439 
 440     // Unlock for writes into the new table buckets.
 441     _new_table->get_bucket(even_index)->unlock();
 442     _new_table->get_bucket(odd_index)->unlock();
 443 
 444     DEBUG_ONLY(
 445        bucket->release_assign_node_ptr(
 446           _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
 447     )
 448   }
 449 }
 450 
 451 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 452 template <typename LOOKUP_FUNC, typename DELETE_FUNC>
 453 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 454   internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
 455 {
 456   Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 457   assert(bucket->is_locked(), "Must be locked.");
 458   Node* const volatile * rem_n_prev = bucket->first_ptr();
 459   Node* rem_n = bucket->first();
 460   bool have_dead = false;
 461   while (rem_n != NULL) {
 462     if (lookup_f.equals(rem_n->value(), &have_dead)) {
 463       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 464       break;
 465     } else {
 466       rem_n_prev = rem_n->next_ptr();
 467       rem_n = rem_n->next();
 468     }
 469   }
 470 
 471   bucket->unlock();
 472 
 473   if (rem_n == NULL) {
 474     return false;
 475   }
 476   // Publish the deletion.
 477   GlobalCounter::write_synchronize();
 478   delete_f(rem_n->value());
 479   Node::destroy_node(rem_n);
 480   return true;
 481 }
 482 
 483 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 484 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 485 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 486   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
 487                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
 488 {
 489   // Here we have resize lock so table is SMR safe, and there is no new
 490   // table. Can do this in parallel if we want.
 491   assert(_resize_lock_owner == thread, "Re-size lock not held");
 492   Node* ndel[BULK_DELETE_LIMIT];
 493   InternalTable* table = get_table();
 494   assert(start_idx < stop_idx, "Must be");
 495   assert(stop_idx <= _table->_size, "Must be");
 496   // Here manual do critical section since we don't want to take the cost of
 497   // locking the bucket if there is nothing to delete. But we can have
 498   // concurrent single deletes. The _invisible_epoch can only be used by the
 499   // owner of _resize_lock, us here. There we should not changed it in our
 500   // own read-side.
 501   GlobalCounter::critical_section_begin(thread);
 502   for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
 503     Bucket* bucket = table->get_bucket(bucket_it);
 504     Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
 505                               table->get_bucket(bucket_it+1) : NULL;
 506 
 507     if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
 508         have_deletable(bucket, eval_f, prefetch_bucket)) {
 509         // Nothing to remove in this bucket.
 510         continue;
 511     }
 512 
 513     GlobalCounter::critical_section_end(thread);
 514     // We left critical section but the bucket cannot be removed while we hold
 515     // the _resize_lock.
 516     bucket->lock();
 517     size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
 518     bucket->unlock();
 519     write_synchonize_on_visible_epoch(thread);
 520     for (size_t node_it = 0; node_it < nd; node_it++) {
 521       del_f(ndel[node_it]->value());
 522       Node::destroy_node(ndel[node_it]);
 523       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 524     }
 525     GlobalCounter::critical_section_begin(thread);
 526   }
 527   GlobalCounter::critical_section_end(thread);
 528 }
 529 
 530 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 531 template <typename LOOKUP_FUNC>
 532 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 533   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 534 {
 535   size_t dels = 0;
 536   Node* ndel[BULK_DELETE_LIMIT];
 537   Node* const volatile * rem_n_prev = bucket->first_ptr();
 538   Node* rem_n = bucket->first();
 539   while (rem_n != NULL) {
 540     bool is_dead = false;
 541     lookup_f.equals(rem_n->value(), &is_dead);
 542     if (is_dead) {
 543       ndel[dels++] = rem_n;
 544       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 545       rem_n = rem_n->next();
 546       if (dels == BULK_DELETE_LIMIT) {
 547         break;
 548       }
 549     } else {
 550       rem_n_prev = rem_n->next_ptr();
 551       rem_n = rem_n->next();
 552     }
 553   }
 554   if (dels > 0) {
 555     GlobalCounter::write_synchronize();
 556     for (size_t node_it = 0; node_it < dels; node_it++) {
 557       Node::destroy_node(ndel[node_it]);
 558       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 559     }
 560   }
 561 }
 562 
 563 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 564 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 565 ConcurrentHashTable<VALUE, CONFIG, F>::
 566   get_bucket(uintx hash) const
 567 {
 568   InternalTable* table = get_table();
 569   Bucket* bucket = get_bucket_in(table, hash);
 570   if (bucket->have_redirect()) {
 571     table = get_new_table();
 572     bucket = get_bucket_in(table, hash);
 573   }
 574   return bucket;
 575 }
 576 
 577 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 578 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 579 ConcurrentHashTable<VALUE, CONFIG, F>::
 580   get_bucket_locked(Thread* thread, const uintx hash)
 581 {
 582   Bucket* bucket;
 583   int i = 0;
 584   // SpinYield would be unfair here
 585   while(true) {
 586     {
 587       // We need a critical section to protect the table itself. But if we fail
 588       // we must leave critical section otherwise we would deadlock.
 589       ScopedCS cs(thread, this);
 590       bucket = get_bucket(hash);
 591       if (bucket->trylock()) {
 592         break; /* ends critical section */
 593       }
 594     } /* ends critical section */
 595     if ((++i) == SPINPAUSES_PER_YIELD) {
 596       // On contemporary OS yielding will give CPU to another runnable thread if
 597       // there is no CPU available.
 598       os::naked_yield();
 599       i = 0;
 600     } else {
 601       SpinPause();
 602     }
 603   }
 604   return bucket;
 605 }
 606 
 607 // Always called within critical section
 608 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 609 template <typename LOOKUP_FUNC>
 610 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
 611 ConcurrentHashTable<VALUE, CONFIG, F>::
 612   get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
 613            bool* have_dead, size_t* loops) const
 614 {
 615   size_t loop_count = 0;
 616   Node* node = bucket->first();
 617   while (node != NULL) {
 618     bool is_dead = false;
 619     ++loop_count;
 620     if (lookup_f.equals(node->value(), &is_dead)) {
 621       break;
 622     }
 623     if (is_dead && !(*have_dead)) {
 624       *have_dead = true;
 625     }
 626     node = node->next();
 627   }
 628   if (loops != NULL) {
 629     *loops = loop_count;
 630   }
 631   return node;
 632 }
 633 
 634 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 635 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 636   unzip_bucket(Thread* thread, InternalTable* old_table,
 637                InternalTable* new_table, size_t even_index, size_t odd_index)
 638 {
 639   Node* aux = old_table->get_bucket(even_index)->first();
 640   if (aux == NULL) {
 641     // This is an empty bucket and in debug we poison first ptr in bucket.
 642     // Therefore we must make sure no readers are looking at this bucket.
 643     // If we don't do a write_synch here, caller must do it.
 644     return false;
 645   }
 646   Node* delete_me = NULL;
 647   Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
 648   Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
 649   while (aux != NULL) {
 650     bool dead_hash = false;
 651     size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
 652     if (dead_hash) {
 653       delete_me = aux;
 654       // This item is dead, move both list to next
 655       new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 656                                                                 aux->next());
 657       new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 658                                                                  aux->next());
 659     } else {
 660       size_t aux_index = bucket_idx_hash(new_table, aux_hash);
 661       if (aux_index == even_index) {
 662         // This is a even, so move odd to aux/even next
 663         new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 664                                                                   aux->next());
 665         // Keep in even list
 666         even = aux->next_ptr();
 667       } else if (aux_index == odd_index) {
 668         // This is a odd, so move odd to aux/odd next
 669         new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 670                                                                    aux->next());
 671         // Keep in odd list
 672         odd = aux->next_ptr();
 673       } else {
 674         fatal("aux_index does not match even or odd indices");
 675       }
 676     }
 677     aux = aux->next();
 678 
 679     // We can only move 1 pointer otherwise a reader might be moved to the wrong
 680     // chain. E.g. looking for even hash value but got moved to the odd bucket
 681     // chain.
 682     write_synchonize_on_visible_epoch(thread);
 683     if (delete_me != NULL) {
 684       Node::destroy_node(delete_me);
 685       delete_me = NULL;
 686     }
 687   }
 688   return true;
 689 }
 690 
 691 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 692 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 693   internal_shrink_prolog(Thread* thread, size_t log2_size)
 694 {
 695   if (!try_resize_lock(thread)) {
 696     return false;
 697   }
 698   assert(_resize_lock_owner == thread, "Re-size lock not held");
 699   if (_table->_log2_size == _log2_start_size ||
 700       _table->_log2_size <= log2_size) {
 701     unlock_resize_lock(thread);
 702     return false;
 703   }
 704   _new_table = new InternalTable(_table->_log2_size - 1);
 705   return true;
 706 }
 707 
 708 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 709 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 710   internal_shrink_epilog(Thread* thread)
 711 {
 712   assert(_resize_lock_owner == thread, "Re-size lock not held");
 713 
 714   InternalTable* old_table = set_table_from_new();
 715   _size_limit_reached = false;
 716   unlock_resize_lock(thread);
 717 #ifdef ASSERT
 718   for (size_t i = 0; i < old_table->_size; i++) {
 719     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 720            "No poison found");
 721   }
 722 #endif
 723   // ABA safe, old_table not visible to any other threads.
 724   delete old_table;
 725 }
 726 
 727 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 728 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 729   internal_shrink_range(Thread* thread, size_t start, size_t stop)
 730 {
 731   // The state is also copied here.
 732   // Hence all buckets in new table will be locked.
 733   for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
 734     size_t even_hash_index = bucket_it; // High bit 0
 735     size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1
 736 
 737     Bucket* b_old_even = _table->get_bucket(even_hash_index);
 738     Bucket* b_old_odd  = _table->get_bucket(odd_hash_index);
 739 
 740     b_old_even->lock();
 741     b_old_odd->lock();
 742 
 743     _new_table->get_buckets()[bucket_it] = *b_old_even;
 744 
 745     // Put chains together.
 746     _new_table->get_bucket(bucket_it)->
 747       release_assign_last_node_next(*(b_old_odd->first_ptr()));
 748 
 749     b_old_even->redirect();
 750     b_old_odd->redirect();
 751 
 752     write_synchonize_on_visible_epoch(thread);
 753 
 754     // Unlock for writes into new smaller table.
 755     _new_table->get_bucket(bucket_it)->unlock();
 756 
 757     DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
 758                                                    (Node*)POISON_PTR);)
 759     DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
 760                                                   (Node*)POISON_PTR);)
 761   }
 762 }
 763 
 764 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 765 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 766   internal_shrink(Thread* thread, size_t log2_size)
 767 {
 768   if (!internal_shrink_prolog(thread, log2_size)) {
 769     assert(_resize_lock_owner != thread, "Re-size lock held");
 770     return false;
 771   }
 772   assert(_resize_lock_owner == thread, "Should be locked by me");
 773   internal_shrink_range(thread, 0, _new_table->_size);
 774   internal_shrink_epilog(thread);
 775   assert(_resize_lock_owner != thread, "Re-size lock held");
 776   return true;
 777 }
 778 
 779 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 780 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 781   internal_grow_prolog(Thread* thread, size_t log2_size)
 782 {
 783   // This double checking of _size_limit_reached/is_max_size_reached()
 784   //  we only do in grow path, since grow means high load on table
 785   // while shrink means low load.
 786   if (is_max_size_reached()) {
 787     return false;
 788   }
 789   if (!try_resize_lock(thread)) {
 790     // Either we have an ongoing resize or an operation which doesn't want us
 791     // to resize now.
 792     return false;
 793   }
 794   if (is_max_size_reached() || _table->_log2_size >= log2_size) {
 795     unlock_resize_lock(thread);
 796     return false;
 797   }
 798 
 799   _new_table = new InternalTable(_table->_log2_size + 1);
 800 
 801   if (_new_table->_log2_size == _log2_size_limit) {
 802     _size_limit_reached = true;
 803   }
 804 
 805   return true;
 806 }
 807 
 808 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 809 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 810   internal_grow_epilog(Thread* thread)
 811 {
 812   assert(_resize_lock_owner == thread, "Should be locked");
 813 
 814   InternalTable* old_table = set_table_from_new();
 815   unlock_resize_lock(thread);
 816 #ifdef ASSERT
 817   for (size_t i = 0; i < old_table->_size; i++) {
 818     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 819            "No poison found");
 820   }
 821 #endif
 822   // ABA safe, old_table not visible to any other threads.
 823   delete old_table;
 824 }
 825 
 826 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 827 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 828   internal_grow(Thread* thread, size_t log2_size)
 829 {
 830   if (!internal_grow_prolog(thread, log2_size)) {
 831     assert(_resize_lock_owner != thread, "Re-size lock held");
 832     return false;
 833   }
 834   assert(_resize_lock_owner == thread, "Should be locked by me");
 835   internal_grow_range(thread, 0, _table->_size);
 836   internal_grow_epilog(thread);
 837   assert(_resize_lock_owner != thread, "Re-size lock held");
 838   return true;
 839 }
 840 
 841 // Always called within critical section
 842 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 843 template <typename LOOKUP_FUNC>
 844 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
 845   internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
 846 {
 847   bool clean = false;
 848   size_t loops = 0;
 849   VALUE* ret = NULL;
 850 
 851   const Bucket* bucket = get_bucket(lookup_f.get_hash());
 852   Node* node = get_node(bucket, lookup_f, &clean, &loops);
 853   if (node != NULL) {
 854     ret = node->value();
 855   }
 856   if (grow_hint != NULL) {
 857     *grow_hint = loops > _grow_hint;
 858   }
 859 
 860   return ret;
 861 }
 862 
 863 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 864 template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
 865 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 866   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f,
 867                   CALLBACK_FUNC& callback, bool* grow_hint)
 868 {
 869   bool ret = false;
 870   bool clean = false;
 871   bool locked;
 872   size_t loops = 0;
 873   size_t i = 0;
 874   Node* new_node = NULL;
 875   uintx hash = lookup_f.get_hash();
 876   while (true) {
 877     {
 878       ScopedCS cs(thread, this); /* protected the table/bucket */
 879       Bucket* bucket = get_bucket(hash);
 880 
 881       Node* first_at_start = bucket->first();
 882       Node* old = get_node(bucket, lookup_f, &clean, &loops);
 883       if (old == NULL) {
 884         // No duplicate found.
 885         if (new_node == NULL) {
 886           new_node = Node::create_node(value_f(), first_at_start);
 887         } else {
 888           new_node->set_next(first_at_start);
 889         }
 890         if (bucket->cas_first(new_node, first_at_start)) {
 891           callback(true, new_node->value());
 892           new_node = NULL;
 893           ret = true;
 894           break; /* leave critical section */
 895         }
 896         // CAS failed we must leave critical section and retry.
 897         locked = bucket->is_locked();
 898       } else {
 899         // There is a duplicate.
 900         callback(false, old->value());
 901         break; /* leave critical section */
 902       }
 903     } /* leave critical section */
 904     i++;
 905     if (locked) {
 906       os::naked_yield();
 907     } else {
 908       SpinPause();
 909     }
 910   }
 911 
 912   if (new_node != NULL) {
 913     // CAS failed and a duplicate was inserted, we must free this node.
 914     Node::destroy_node(new_node);
 915   } else if (i == 0 && clean) {
 916     // We only do cleaning on fast inserts.
 917     Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 918     assert(bucket->is_locked(), "Must be locked.");
 919     delete_in_bucket(thread, bucket, lookup_f);
 920     bucket->unlock();
 921   }
 922 
 923   if (grow_hint != NULL) {
 924     *grow_hint = loops > _grow_hint;
 925   }
 926 
 927   return ret;
 928 }
 929 
 930 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 931 template <typename FUNC>
 932 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 933   visit_nodes(Bucket* bucket, FUNC& visitor_f)
 934 {
 935   Node* current_node = bucket->first();
 936   while (current_node != NULL) {
 937     if (!visitor_f(current_node->value())) {
 938       return false;
 939     }
 940     current_node = current_node->next();
 941   }
 942   return true;
 943 }
 944 
 945 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 946 template <typename FUNC>
 947 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 948   do_scan_locked(Thread* thread, FUNC& scan_f)
 949 {
 950   assert(_resize_lock_owner == thread, "Re-size lock not held");
 951   // We can do a critical section over the entire loop but that would block
 952   // updates for a long time. Instead we choose to block resizes.
 953   InternalTable* table = get_table();
 954   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
 955     ScopedCS cs(thread, this);
 956     if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
 957       break; /* ends critical section */
 958     }
 959   } /* ends critical section */
 960 }
 961 
 962 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 963 template <typename EVALUATE_FUNC>
 964 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
 965   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
 966                      size_t num_del, Node** ndel)
 967 {
 968   size_t dels = 0;
 969   Node* const volatile * rem_n_prev = bucket->first_ptr();
 970   Node* rem_n = bucket->first();
 971   while (rem_n != NULL) {
 972     if (eval_f(rem_n->value())) {
 973       ndel[dels++] = rem_n;
 974       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 975       rem_n = rem_n->next();
 976       if (dels == num_del) {
 977         break;
 978       }
 979     } else {
 980       rem_n_prev = rem_n->next_ptr();
 981       rem_n = rem_n->next();
 982     }
 983   }
 984   return dels;
 985 }
 986 
 987 // Constructor
 988 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 989 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 990   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
 991     : _new_table(NULL), _log2_start_size(log2size),
 992        _log2_size_limit(log2size_limit), _grow_hint(grow_hint),
 993        _size_limit_reached(false), _resize_lock_owner(NULL),
 994        _invisible_epoch(0)
 995 {
 996   _resize_lock =
 997     new Mutex(Mutex::leaf, "ConcurrentHashTable", false,
 998               Monitor::_safepoint_check_never);
 999   _table = new InternalTable(log2size);
1000   assert(log2size_limit >= log2size, "bad ergo");
1001   _size_limit_reached = _table->_log2_size == _log2_size_limit;
1002 }
1003 
1004 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1005 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1006   ~ConcurrentHashTable()
1007 {
1008   delete _resize_lock;
1009   free_nodes();
1010   delete _table;
1011 }
1012 
1013 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1014 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
1015   get_size_log2(Thread* thread)
1016 {
1017   ScopedCS cs(thread, this);
1018   return _table->_log2_size;
1019 }
1020 
1021 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1022 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1023   shrink(Thread* thread, size_t size_limit_log2)
1024 {
1025   size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
1026   bool ret = internal_shrink(thread, tmp);
1027   return ret;
1028 }
1029 
1030 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1031 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1032   grow(Thread* thread, size_t size_limit_log2)
1033 {
1034   size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
1035   return internal_grow(thread, tmp);
1036 }
1037 
1038 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1039 template <typename LOOKUP_FUNC, typename FOUND_FUNC>
1040 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1041   get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
1042 {
1043   bool ret = false;
1044   ScopedCS cs(thread, this);
1045   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1046   if (val != NULL) {
1047     found_f(val);
1048     ret = true;
1049   }
1050   return ret;
1051 }
1052 
1053 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1054 template <typename LOOKUP_FUNC>
1055 inline VALUE ConcurrentHashTable<VALUE, CONFIG, F>::
1056   get_copy(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
1057 {
1058   ScopedCS cs(thread, this);
1059   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1060   return val != NULL ? *val : CONFIG::notfound();
1061 }
1062 
1063 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1064 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1065   unsafe_insert(const VALUE& value) {
1066   bool dead_hash = false;
1067   size_t hash = CONFIG::get_hash(value, &dead_hash);
1068   if (dead_hash) {
1069     return false;
1070   }
1071   // This is an unsafe operation.
1072   InternalTable* table = get_table();
1073   Bucket* bucket = get_bucket_in(table, hash);
1074   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1075   Node* new_node = Node::create_node(value, bucket->first());
1076   if (!bucket->cas_first(new_node, bucket->first())) {
1077     assert(false, "bad");
1078   }
1079   return true;
1080 }
1081 
1082 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1083 template <typename SCAN_FUNC>
1084 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1085   try_scan(Thread* thread, SCAN_FUNC& scan_f)
1086 {
1087   if (!try_resize_lock(thread)) {
1088     return false;
1089   }
1090   do_scan_locked(thread, scan_f);
1091   unlock_resize_lock(thread);
1092   return true;
1093 }
1094 
1095 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1096 template <typename SCAN_FUNC>
1097 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1098   do_scan(Thread* thread, SCAN_FUNC& scan_f)
1099 {
1100   assert(_resize_lock_owner != thread, "Re-size lock held");
1101   lock_resize_lock(thread);
1102   do_scan_locked(thread, scan_f);
1103   unlock_resize_lock(thread);
1104   assert(_resize_lock_owner != thread, "Re-size lock held");
1105 }
1106 
1107 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1108 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1109 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1110   try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1111 {
1112   if (!try_resize_lock(thread)) {
1113     return false;
1114   }
1115   do_bulk_delete_locked(thread, eval_f, del_f);
1116   unlock_resize_lock(thread);
1117   assert(_resize_lock_owner != thread, "Re-size lock held");
1118   return true;
1119 }
1120 
1121 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1122 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1123 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1124   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1125 {
1126   lock_resize_lock(thread);
1127   do_bulk_delete_locked(thread, eval_f, del_f);
1128   unlock_resize_lock(thread);
1129 }
1130 
1131 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1132 template <typename VALUE_SIZE_FUNC>
1133 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1134   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1135                 outputStream* st, const char* table_name)
1136 {
1137   NumberSeq summary;
1138   size_t literal_bytes = 0;
1139   if (!try_resize_lock(thread)) {
1140     st->print_cr("statistics unavailable at this moment");
1141     return;
1142   }
1143 
1144   InternalTable* table = get_table();
1145   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1146     ScopedCS cs(thread, this);
1147     size_t count = 0;
1148     Bucket* bucket = table->get_bucket(bucket_it);
1149     if (bucket->have_redirect() || bucket->is_locked()) {
1150         continue;
1151     }
1152     Node* current_node = bucket->first();
1153     while (current_node != NULL) {
1154       ++count;
1155       literal_bytes += vs_f(current_node->value());
1156       current_node = current_node->next();
1157     }
1158     summary.add((double)count);
1159   }
1160 
1161   double num_buckets = summary.num();
1162   double num_entries = summary.sum();
1163 
1164   size_t bucket_bytes = num_buckets * sizeof(Bucket);
1165   size_t entry_bytes  = num_entries * sizeof(Node);
1166   size_t total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
1167 
1168   size_t bucket_size  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
1169   size_t entry_size   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
1170 
1171   st->print_cr("%s statistics:", table_name);
1172   st->print_cr("Number of buckets       : %9" PRIuPTR " = %9" PRIuPTR
1173                " bytes, each " SIZE_FORMAT,
1174                (size_t)num_buckets, bucket_bytes,  bucket_size);
1175   st->print_cr("Number of entries       : %9" PRIuPTR " = %9" PRIuPTR
1176                " bytes, each " SIZE_FORMAT,
1177                (size_t)num_entries, entry_bytes,   entry_size);
1178   if (literal_bytes != 0) {
1179     double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
1180     st->print_cr("Number of literals      : %9" PRIuPTR " = %9" PRIuPTR
1181                  " bytes, avg %7.3f",
1182                  (size_t)num_entries, literal_bytes, literal_avg);
1183   }
1184   st->print_cr("Total footprsize_t         : %9s = %9" PRIuPTR " bytes", ""
1185                , total_bytes);
1186   st->print_cr("Average bucket size     : %9.3f", summary.avg());
1187   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
1188   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
1189   st->print_cr("Maximum bucket size     : %9" PRIuPTR,
1190                (size_t)summary.maximum());
1191   unlock_resize_lock(thread);
1192 }
1193 
1194 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1195 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1196   try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht)
1197 {
1198   if (!try_resize_lock(thread)) {
1199     return false;
1200   }
1201   assert(_new_table == NULL, "Must be NULL");
1202   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1203     Bucket* bucket = _table->get_bucket(bucket_it);
1204     assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1205     while (bucket->first() != NULL) {
1206       Node* move_node = bucket->first();
1207       bool ok = bucket->cas_first(move_node->next(), move_node);
1208       assert(ok, "Uncontended cas must work");
1209       bool dead_hash = false;
1210       size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1211       if (!dead_hash) {
1212         Bucket* insert_bucket = to_cht->get_bucket(insert_hash);
1213         assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present");
1214         move_node->set_next(insert_bucket->first());
1215         ok = insert_bucket->cas_first(move_node, insert_bucket->first());
1216         assert(ok, "Uncontended cas must work");
1217       }
1218     }
1219   }
1220   unlock_resize_lock(thread);
1221   return true;
1222 }
1223 
1224 #endif // include guard