1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
  26 #define SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
  27 
  28 #include "memory/allocation.inline.hpp"
  29 #include "runtime/atomic.hpp"
  30 #include "runtime/orderAccess.hpp"
  31 #include "runtime/mutex.inline.hpp"
  32 #include "runtime/prefetch.inline.hpp"
  33 #include "utilities/concurrentHashTable.hpp"
  34 #include "utilities/globalCounter.inline.hpp"
  35 #include "utilities/numberSeq.hpp"
  36 #include "utilities/spinYield.hpp"
  37 
  38 // 2^30 = 1G buckets
  39 #define SIZE_BIG_LOG2 30
  40 // 2^5  = 32 buckets
  41 #define SIZE_SMALL_LOG2 5
  42 
  43 // Number from spinYield.hpp. In some loops SpinYield would be unfair.
  44 #define SPINPAUSES_PER_YIELD 8192
  45 
  46 #ifdef ASSERT
  47 #ifdef _LP64
  48 // Two low bits are not usable.
  49 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
  50 #else
  51 // Two low bits are not usable.
  52 static const void* POISON_PTR = (void*)0xffbadbac;
  53 #endif
  54 #endif
  55 
  56 // Node
  57 template <typename CONFIG, MEMFLAGS F>
  58 inline typename ConcurrentHashTable<CONFIG, F>::Node*
  59 ConcurrentHashTable<CONFIG, F>::
  60   Node::next() const
  61 {
  62   return OrderAccess::load_acquire(&_next);
  63 }
  64 
  65 // Bucket
  66 template <typename CONFIG, MEMFLAGS F>
  67 inline typename ConcurrentHashTable<CONFIG, F>::Node*
  68 ConcurrentHashTable<CONFIG, F>::
  69   Bucket::first_raw() const
  70 {
  71   return OrderAccess::load_acquire(&_first);
  72 }
  73 
  74 template <typename CONFIG, MEMFLAGS F>
  75 inline void ConcurrentHashTable<CONFIG, F>::
  76   Bucket::release_assign_node_ptr(
  77     typename ConcurrentHashTable<CONFIG, F>::Node* const volatile * dst,
  78     typename ConcurrentHashTable<CONFIG, F>::Node* node) const
  79 {
  80   // Due to this assert this methods is not static.
  81   assert(is_locked(), "Must be locked.");
  82   Node** tmp = (Node**)dst;
  83   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
  84 }
  85 
  86 template <typename CONFIG, MEMFLAGS F>
  87 inline typename ConcurrentHashTable<CONFIG, F>::Node*
  88 ConcurrentHashTable<CONFIG, F>::
  89   Bucket::first() const
  90 {
  91   // We strip the states bit before returning the ptr.
  92   return clear_state(OrderAccess::load_acquire(&_first));
  93 }
  94 
  95 template <typename CONFIG, MEMFLAGS F>
  96 inline bool ConcurrentHashTable<CONFIG, F>::
  97   Bucket::have_redirect() const
  98 {
  99   return is_state(first_raw(), STATE_REDIRECT_BIT);
 100 }
 101 
 102 template <typename CONFIG, MEMFLAGS F>
 103 inline bool ConcurrentHashTable<CONFIG, F>::
 104   Bucket::is_locked() const
 105 {
 106   return is_state(first_raw(), STATE_LOCK_BIT);
 107 }
 108 
 109 template <typename CONFIG, MEMFLAGS F>
 110 inline void ConcurrentHashTable<CONFIG, F>::
 111   Bucket::lock()
 112 {
 113   int i = 0;
 114   // SpinYield would be unfair here
 115   while (!this->trylock()) {
 116     if ((++i) == SPINPAUSES_PER_YIELD) {
 117       // On contemporary OS yielding will give CPU to another runnable thread if
 118       // there is no CPU available.
 119       os::naked_yield();
 120       i = 0;
 121     } else {
 122       SpinPause();
 123     }
 124   }
 125 }
 126 
 127 template <typename CONFIG, MEMFLAGS F>
 128 inline void ConcurrentHashTable<CONFIG, F>::
 129   Bucket::release_assign_last_node_next(
 130      typename ConcurrentHashTable<CONFIG, F>::Node* node)
 131 {
 132   assert(is_locked(), "Must be locked.");
 133   Node* const volatile * ret = first_ptr();
 134   while (clear_state(*ret) != NULL) {
 135     ret = clear_state(*ret)->next_ptr();
 136   }
 137   release_assign_node_ptr(ret, node);
 138 }
 139 
 140 template <typename CONFIG, MEMFLAGS F>
 141 inline bool ConcurrentHashTable<CONFIG, F>::
 142   Bucket::cas_first(typename ConcurrentHashTable<CONFIG, F>::Node* node,
 143                     typename ConcurrentHashTable<CONFIG, F>::Node* expect
 144                     )
 145 {
 146   if (is_locked()) {
 147     return false;
 148   }
 149   if (Atomic::cmpxchg(node, &_first, expect) == expect) {
 150     return true;
 151   }
 152   return false;
 153 }
 154 
 155 template <typename CONFIG, MEMFLAGS F>
 156 inline bool ConcurrentHashTable<CONFIG, F>::
 157   Bucket::trylock()
 158 {
 159   if (is_locked()) {
 160     return false;
 161   }
 162   // We will expect a clean first pointer.
 163   Node* tmp = first();
 164   if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) {
 165     return true;
 166   }
 167   return false;
 168 }
 169 
 170 template <typename CONFIG, MEMFLAGS F>
 171 inline void ConcurrentHashTable<CONFIG, F>::
 172   Bucket::unlock()
 173 {
 174   assert(is_locked(), "Must be locked.");
 175   assert(!have_redirect(),
 176          "Unlocking a bucket after it has reached terminal state.");
 177   OrderAccess::release_store(&_first, clear_state(first()));
 178 }
 179 
 180 template <typename CONFIG, MEMFLAGS F>
 181 inline void ConcurrentHashTable<CONFIG, F>::
 182   Bucket::redirect()
 183 {
 184   assert(is_locked(), "Must be locked.");
 185   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
 186 }
 187 
 188 // InternalTable
 189 template <typename CONFIG, MEMFLAGS F>
 190 inline ConcurrentHashTable<CONFIG, F>::
 191   InternalTable::InternalTable(size_t log2_size)
 192     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
 193       _hash_mask(~(~((size_t)0) << _log2_size))
 194 {
 195   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
 196          "Bad size");
 197   _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F);
 198   // Use placement new for each element instead of new[] which could use more
 199   // memory than allocated.
 200   for (size_t i = 0; i < _size; ++i) {
 201     new (_buckets + i) Bucket();
 202   }
 203 }
 204 
 205 template <typename CONFIG, MEMFLAGS F>
 206 inline ConcurrentHashTable<CONFIG, F>::
 207   InternalTable::~InternalTable()
 208 {
 209   FREE_C_HEAP_ARRAY(Bucket, _buckets);
 210 }
 211 
 212 // ScopedCS
 213 template <typename CONFIG, MEMFLAGS F>
 214 inline ConcurrentHashTable<CONFIG, F>::
 215   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht)
 216     : _thread(thread),
 217       _cht(cht),
 218       _cs_context(GlobalCounter::critical_section_begin(_thread))
 219 {
 220   // This version is published now.
 221   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
 222     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
 223   }
 224 }
 225 
 226 template <typename CONFIG, MEMFLAGS F>
 227 inline ConcurrentHashTable<CONFIG, F>::
 228   ScopedCS::~ScopedCS()
 229 {
 230   GlobalCounter::critical_section_end(_thread, _cs_context);
 231 }
 232 
 233 template <typename CONFIG, MEMFLAGS F>
 234 template <typename LOOKUP_FUNC>
 235 inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
 236   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
 237 {
 238   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
 239 }
 240 
 241 // HaveDeletables
 242 template <typename CONFIG, MEMFLAGS F>
 243 template <typename EVALUATE_FUNC>
 244 inline bool ConcurrentHashTable<CONFIG, F>::
 245   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 246                                                       EVALUATE_FUNC& eval_f,
 247                                                       Bucket* prefetch_bucket)
 248 {
 249   // Instantiated for pointer type (true), so we can use prefetch.
 250   // When visiting all Nodes doing this prefetch give around 30%.
 251   Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
 252   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 253     if (pref != NULL) {
 254       Prefetch::read(*pref->value(), 0);
 255       pref = pref->next();
 256     }
 257     // Read next() Node* once.  May be racing with a thread moving the next
 258     // pointers.
 259     Node* next_pref = next->next();
 260     if (next_pref != NULL) {
 261       Prefetch::read(*next_pref->value(), 0);
 262     }
 263     if (eval_f(next->value())) {
 264       return true;
 265     }
 266   }
 267   return false;
 268 }
 269 
 270 template <typename CONFIG, MEMFLAGS F>
 271 template <bool b, typename EVALUATE_FUNC>
 272 inline bool ConcurrentHashTable<CONFIG, F>::
 273   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 274                                                    EVALUATE_FUNC& eval_f,
 275                                                    Bucket* preb)
 276 {
 277   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 278     if (eval_f(next->value())) {
 279       return true;
 280     }
 281   }
 282   return false;
 283 }
 284 
 285 // ConcurrentHashTable
 286 template <typename CONFIG, MEMFLAGS F>
 287 inline void ConcurrentHashTable<CONFIG, F>::
 288   write_synchonize_on_visible_epoch(Thread* thread)
 289 {
 290   assert(_resize_lock_owner == thread, "Re-size lock not held");
 291   OrderAccess::fence(); // Prevent below load from floating up.
 292   // If no reader saw this version we can skip write_synchronize.
 293   if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
 294     return;
 295   }
 296   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
 297   // We set this/next version that we are synchronizing for to not published.
 298   // A reader will zero this flag if it reads this/next version.
 299   OrderAccess::release_store(&_invisible_epoch, thread);
 300   GlobalCounter::write_synchronize();
 301 }
 302 
 303 template <typename CONFIG, MEMFLAGS F>
 304 inline bool ConcurrentHashTable<CONFIG, F>::
 305   try_resize_lock(Thread* locker)
 306 {
 307   if (_resize_lock->try_lock()) {
 308     if (_resize_lock_owner != NULL) {
 309       assert(locker != _resize_lock_owner, "Already own lock");
 310       // We got mutex but internal state is locked.
 311       _resize_lock->unlock();
 312       return false;
 313     }
 314   } else {
 315     return false;
 316   }
 317   _invisible_epoch = 0;
 318   _resize_lock_owner = locker;
 319   return true;
 320 }
 321 
 322 template <typename CONFIG, MEMFLAGS F>
 323 inline void ConcurrentHashTable<CONFIG, F>::
 324   lock_resize_lock(Thread* locker)
 325 {
 326   size_t i = 0;
 327   // If lock is hold by some other thread, the chances that it is return quick
 328   // is low. So we will prefer yielding.
 329   SpinYield yield(1, 512);
 330   do {
 331     _resize_lock->lock_without_safepoint_check();
 332     // If holder of lock dropped mutex for safepoint mutex might be unlocked,
 333     // and _resize_lock_owner will contain the owner.
 334     if (_resize_lock_owner != NULL) {
 335       assert(locker != _resize_lock_owner, "Already own lock");
 336       // We got mutex but internal state is locked.
 337       _resize_lock->unlock();
 338       yield.wait();
 339     } else {
 340       break;
 341     }
 342   } while(true);
 343   _resize_lock_owner = locker;
 344   _invisible_epoch = 0;
 345 }
 346 
 347 template <typename CONFIG, MEMFLAGS F>
 348 inline void ConcurrentHashTable<CONFIG, F>::
 349   unlock_resize_lock(Thread* locker)
 350 {
 351   _invisible_epoch = 0;
 352   assert(locker == _resize_lock_owner, "Not unlocked by locker.");
 353   _resize_lock_owner = NULL;
 354   _resize_lock->unlock();
 355 }
 356 
 357 template <typename CONFIG, MEMFLAGS F>
 358 inline void ConcurrentHashTable<CONFIG, F>::
 359   free_nodes()
 360 {
 361   // We assume we are not MT during freeing.
 362   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
 363     Bucket* bucket = _table->get_buckets() + node_it;
 364     Node* node = bucket->first();
 365     while (node != NULL) {
 366       Node* free_node = node;
 367       node = node->next();
 368       Node::destroy_node(free_node);
 369     }
 370   }
 371 }
 372 
 373 template <typename CONFIG, MEMFLAGS F>
 374 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
 375 ConcurrentHashTable<CONFIG, F>::
 376   get_table() const
 377 {
 378   return OrderAccess::load_acquire(&_table);
 379 }
 380 
 381 template <typename CONFIG, MEMFLAGS F>
 382 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
 383 ConcurrentHashTable<CONFIG, F>::
 384   get_new_table() const
 385 {
 386   return OrderAccess::load_acquire(&_new_table);
 387 }
 388 
 389 template <typename CONFIG, MEMFLAGS F>
 390 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
 391 ConcurrentHashTable<CONFIG, F>::
 392   set_table_from_new()
 393 {
 394   InternalTable* old_table = _table;
 395   // Publish the new table.
 396   OrderAccess::release_store(&_table, _new_table);
 397   // All must see this.
 398   GlobalCounter::write_synchronize();
 399   // _new_table not read any more.
 400   _new_table = NULL;
 401   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
 402   return old_table;
 403 }
 404 
 405 template <typename CONFIG, MEMFLAGS F>
 406 inline void ConcurrentHashTable<CONFIG, F>::
 407   internal_grow_range(Thread* thread, size_t start, size_t stop)
 408 {
 409   assert(stop <= _table->_size, "Outside backing array");
 410   assert(_new_table != NULL, "Grow not proper setup before start");
 411   // The state is also copied here. Hence all buckets in new table will be
 412   // locked. I call the siblings odd/even, where even have high bit 0 and odd
 413   // have high bit 1.
 414   for (size_t even_index = start; even_index < stop; even_index++) {
 415     Bucket* bucket = _table->get_bucket(even_index);
 416 
 417     bucket->lock();
 418 
 419     size_t odd_index = even_index + _table->_size;
 420     _new_table->get_buckets()[even_index] = *bucket;
 421     _new_table->get_buckets()[odd_index] = *bucket;
 422 
 423     // Moves lockers go to new table, where they will wait until unlock() below.
 424     bucket->redirect(); /* Must release stores above */
 425 
 426     // When this is done we have separated the nodes into corresponding buckets
 427     // in new table.
 428     if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) {
 429       // If bucket is empty, unzip does nothing.
 430       // We must make sure readers go to new table before we poison the bucket.
 431       DEBUG_ONLY(GlobalCounter::write_synchronize();)
 432     }
 433 
 434     // Unlock for writes into the new table buckets.
 435     _new_table->get_bucket(even_index)->unlock();
 436     _new_table->get_bucket(odd_index)->unlock();
 437 
 438     DEBUG_ONLY(
 439        bucket->release_assign_node_ptr(
 440           _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
 441     )
 442   }
 443 }
 444 
 445 template <typename CONFIG, MEMFLAGS F>
 446 template <typename LOOKUP_FUNC, typename DELETE_FUNC>
 447 inline bool ConcurrentHashTable<CONFIG, F>::
 448   internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
 449 {
 450   Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 451   assert(bucket->is_locked(), "Must be locked.");
 452   Node* const volatile * rem_n_prev = bucket->first_ptr();
 453   Node* rem_n = bucket->first();
 454   bool have_dead = false;
 455   while (rem_n != NULL) {
 456     if (lookup_f.equals(rem_n->value(), &have_dead)) {
 457       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 458       break;
 459     } else {
 460       rem_n_prev = rem_n->next_ptr();
 461       rem_n = rem_n->next();
 462     }
 463   }
 464 
 465   bucket->unlock();
 466 
 467   if (rem_n == NULL) {
 468     return false;
 469   }
 470   // Publish the deletion.
 471   GlobalCounter::write_synchronize();
 472   delete_f(rem_n->value());
 473   Node::destroy_node(rem_n);
 474   JFR_ONLY(_stats_rate.remove();)
 475   return true;
 476 }
 477 
 478 template <typename CONFIG, MEMFLAGS F>
 479 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 480 inline void ConcurrentHashTable<CONFIG, F>::
 481   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
 482                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
 483 {
 484   // Here we have resize lock so table is SMR safe, and there is no new
 485   // table. Can do this in parallel if we want.
 486   assert((is_mt && _resize_lock_owner != NULL) ||
 487          (!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
 488   Node* ndel[BULK_DELETE_LIMIT];
 489   InternalTable* table = get_table();
 490   assert(start_idx < stop_idx, "Must be");
 491   assert(stop_idx <= _table->_size, "Must be");
 492   // Here manual do critical section since we don't want to take the cost of
 493   // locking the bucket if there is nothing to delete. But we can have
 494   // concurrent single deletes. The _invisible_epoch can only be used by the
 495   // owner of _resize_lock, us here. There we should not changed it in our
 496   // own read-side.
 497   GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(thread);
 498   for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
 499     Bucket* bucket = table->get_bucket(bucket_it);
 500     Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
 501                               table->get_bucket(bucket_it+1) : NULL;
 502 
 503     if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
 504         have_deletable(bucket, eval_f, prefetch_bucket)) {
 505         // Nothing to remove in this bucket.
 506         continue;
 507     }
 508 
 509     GlobalCounter::critical_section_end(thread, cs_context);
 510     // We left critical section but the bucket cannot be removed while we hold
 511     // the _resize_lock.
 512     bucket->lock();
 513     size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
 514     bucket->unlock();
 515     if (is_mt) {
 516       GlobalCounter::write_synchronize();
 517     } else {
 518       write_synchonize_on_visible_epoch(thread);
 519     }
 520     for (size_t node_it = 0; node_it < nd; node_it++) {
 521       del_f(ndel[node_it]->value());
 522       Node::destroy_node(ndel[node_it]);
 523       JFR_ONLY(_stats_rate.remove();)
 524       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 525     }
 526     cs_context = GlobalCounter::critical_section_begin(thread);
 527   }
 528   GlobalCounter::critical_section_end(thread, cs_context);
 529 }
 530 
 531 template <typename CONFIG, MEMFLAGS F>
 532 template <typename LOOKUP_FUNC>
 533 inline void ConcurrentHashTable<CONFIG, F>::
 534   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 535 {
 536   assert(bucket->is_locked(), "Must be locked.");
 537 
 538   size_t dels = 0;
 539   Node* ndel[BULK_DELETE_LIMIT];
 540   Node* const volatile * rem_n_prev = bucket->first_ptr();
 541   Node* rem_n = bucket->first();
 542   while (rem_n != NULL) {
 543     bool is_dead = false;
 544     lookup_f.equals(rem_n->value(), &is_dead);
 545     if (is_dead) {
 546       ndel[dels++] = rem_n;
 547       Node* next_node = rem_n->next();
 548       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 549       rem_n = next_node;
 550       if (dels == BULK_DELETE_LIMIT) {
 551         break;
 552       }
 553     } else {
 554       rem_n_prev = rem_n->next_ptr();
 555       rem_n = rem_n->next();
 556     }
 557   }
 558   if (dels > 0) {
 559     GlobalCounter::write_synchronize();
 560     for (size_t node_it = 0; node_it < dels; node_it++) {
 561       Node::destroy_node(ndel[node_it]);
 562       JFR_ONLY(_stats_rate.remove();)
 563       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 564     }
 565   }
 566 }
 567 
 568 template <typename CONFIG, MEMFLAGS F>
 569 inline typename ConcurrentHashTable<CONFIG, F>::Bucket*
 570 ConcurrentHashTable<CONFIG, F>::
 571   get_bucket(uintx hash) const
 572 {
 573   InternalTable* table = get_table();
 574   Bucket* bucket = get_bucket_in(table, hash);
 575   if (bucket->have_redirect()) {
 576     table = get_new_table();
 577     bucket = get_bucket_in(table, hash);
 578   }
 579   return bucket;
 580 }
 581 
 582 template <typename CONFIG, MEMFLAGS F>
 583 inline typename ConcurrentHashTable<CONFIG, F>::Bucket*
 584 ConcurrentHashTable<CONFIG, F>::
 585   get_bucket_locked(Thread* thread, const uintx hash)
 586 {
 587   Bucket* bucket;
 588   int i = 0;
 589   // SpinYield would be unfair here
 590   while(true) {
 591     {
 592       // We need a critical section to protect the table itself. But if we fail
 593       // we must leave critical section otherwise we would deadlock.
 594       ScopedCS cs(thread, this);
 595       bucket = get_bucket(hash);
 596       if (bucket->trylock()) {
 597         break; /* ends critical section */
 598       }
 599     } /* ends critical section */
 600     if ((++i) == SPINPAUSES_PER_YIELD) {
 601       // On contemporary OS yielding will give CPU to another runnable thread if
 602       // there is no CPU available.
 603       os::naked_yield();
 604       i = 0;
 605     } else {
 606       SpinPause();
 607     }
 608   }
 609   return bucket;
 610 }
 611 
 612 // Always called within critical section
 613 template <typename CONFIG, MEMFLAGS F>
 614 template <typename LOOKUP_FUNC>
 615 typename ConcurrentHashTable<CONFIG, F>::Node*
 616 ConcurrentHashTable<CONFIG, F>::
 617   get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
 618            bool* have_dead, size_t* loops) const
 619 {
 620   size_t loop_count = 0;
 621   Node* node = bucket->first();
 622   while (node != NULL) {
 623     bool is_dead = false;
 624     ++loop_count;
 625     if (lookup_f.equals(node->value(), &is_dead)) {
 626       break;
 627     }
 628     if (is_dead && !(*have_dead)) {
 629       *have_dead = true;
 630     }
 631     node = node->next();
 632   }
 633   if (loops != NULL) {
 634     *loops = loop_count;
 635   }
 636   return node;
 637 }
 638 
 639 template <typename CONFIG, MEMFLAGS F>
 640 inline bool ConcurrentHashTable<CONFIG, F>::
 641   unzip_bucket(Thread* thread, InternalTable* old_table,
 642                InternalTable* new_table, size_t even_index, size_t odd_index)
 643 {
 644   Node* aux = old_table->get_bucket(even_index)->first();
 645   if (aux == NULL) {
 646     // This is an empty bucket and in debug we poison first ptr in bucket.
 647     // Therefore we must make sure no readers are looking at this bucket.
 648     // If we don't do a write_synch here, caller must do it.
 649     return false;
 650   }
 651   Node* delete_me = NULL;
 652   Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
 653   Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
 654   while (aux != NULL) {
 655     bool dead_hash = false;
 656     size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
 657     Node* aux_next = aux->next();
 658     if (dead_hash) {
 659       delete_me = aux;
 660       // This item is dead, move both list to next
 661       new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 662                                                                 aux_next);
 663       new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 664                                                                  aux_next);
 665     } else {
 666       size_t aux_index = bucket_idx_hash(new_table, aux_hash);
 667       if (aux_index == even_index) {
 668         // This is a even, so move odd to aux/even next
 669         new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 670                                                                   aux_next);
 671         // Keep in even list
 672         even = aux->next_ptr();
 673       } else if (aux_index == odd_index) {
 674         // This is a odd, so move odd to aux/odd next
 675         new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 676                                                                    aux_next);
 677         // Keep in odd list
 678         odd = aux->next_ptr();
 679       } else {
 680         fatal("aux_index does not match even or odd indices");
 681       }
 682     }
 683     aux = aux_next;
 684 
 685     // We can only move 1 pointer otherwise a reader might be moved to the wrong
 686     // chain. E.g. looking for even hash value but got moved to the odd bucket
 687     // chain.
 688     write_synchonize_on_visible_epoch(thread);
 689     if (delete_me != NULL) {
 690       Node::destroy_node(delete_me);
 691       delete_me = NULL;
 692     }
 693   }
 694   return true;
 695 }
 696 
 697 template <typename CONFIG, MEMFLAGS F>
 698 inline bool ConcurrentHashTable<CONFIG, F>::
 699   internal_shrink_prolog(Thread* thread, size_t log2_size)
 700 {
 701   if (!try_resize_lock(thread)) {
 702     return false;
 703   }
 704   assert(_resize_lock_owner == thread, "Re-size lock not held");
 705   if (_table->_log2_size == _log2_start_size ||
 706       _table->_log2_size <= log2_size) {
 707     unlock_resize_lock(thread);
 708     return false;
 709   }
 710   _new_table = new InternalTable(_table->_log2_size - 1);
 711   return true;
 712 }
 713 
 714 template <typename CONFIG, MEMFLAGS F>
 715 inline void ConcurrentHashTable<CONFIG, F>::
 716   internal_shrink_epilog(Thread* thread)
 717 {
 718   assert(_resize_lock_owner == thread, "Re-size lock not held");
 719 
 720   InternalTable* old_table = set_table_from_new();
 721   _size_limit_reached = false;
 722   unlock_resize_lock(thread);
 723 #ifdef ASSERT
 724   for (size_t i = 0; i < old_table->_size; i++) {
 725     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 726            "No poison found");
 727   }
 728 #endif
 729   // ABA safe, old_table not visible to any other threads.
 730   delete old_table;
 731 }
 732 
 733 template <typename CONFIG, MEMFLAGS F>
 734 inline void ConcurrentHashTable<CONFIG, F>::
 735   internal_shrink_range(Thread* thread, size_t start, size_t stop)
 736 {
 737   // The state is also copied here.
 738   // Hence all buckets in new table will be locked.
 739   for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
 740     size_t even_hash_index = bucket_it; // High bit 0
 741     size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1
 742 
 743     Bucket* b_old_even = _table->get_bucket(even_hash_index);
 744     Bucket* b_old_odd  = _table->get_bucket(odd_hash_index);
 745 
 746     b_old_even->lock();
 747     b_old_odd->lock();
 748 
 749     _new_table->get_buckets()[bucket_it] = *b_old_even;
 750 
 751     // Put chains together.
 752     _new_table->get_bucket(bucket_it)->
 753       release_assign_last_node_next(*(b_old_odd->first_ptr()));
 754 
 755     b_old_even->redirect();
 756     b_old_odd->redirect();
 757 
 758     write_synchonize_on_visible_epoch(thread);
 759 
 760     // Unlock for writes into new smaller table.
 761     _new_table->get_bucket(bucket_it)->unlock();
 762 
 763     DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
 764                                                    (Node*)POISON_PTR);)
 765     DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
 766                                                   (Node*)POISON_PTR);)
 767   }
 768 }
 769 
 770 template <typename CONFIG, MEMFLAGS F>
 771 inline bool ConcurrentHashTable<CONFIG, F>::
 772   internal_shrink(Thread* thread, size_t log2_size)
 773 {
 774   if (!internal_shrink_prolog(thread, log2_size)) {
 775     assert(_resize_lock_owner != thread, "Re-size lock held");
 776     return false;
 777   }
 778   assert(_resize_lock_owner == thread, "Should be locked by me");
 779   internal_shrink_range(thread, 0, _new_table->_size);
 780   internal_shrink_epilog(thread);
 781   assert(_resize_lock_owner != thread, "Re-size lock held");
 782   return true;
 783 }
 784 
 785 template <typename CONFIG, MEMFLAGS F>
 786 inline bool ConcurrentHashTable<CONFIG, F>::
 787   internal_grow_prolog(Thread* thread, size_t log2_size)
 788 {
 789   // This double checking of _size_limit_reached/is_max_size_reached()
 790   //  we only do in grow path, since grow means high load on table
 791   // while shrink means low load.
 792   if (is_max_size_reached()) {
 793     return false;
 794   }
 795   if (!try_resize_lock(thread)) {
 796     // Either we have an ongoing resize or an operation which doesn't want us
 797     // to resize now.
 798     return false;
 799   }
 800   if (is_max_size_reached() || _table->_log2_size >= log2_size) {
 801     unlock_resize_lock(thread);
 802     return false;
 803   }
 804 
 805   _new_table = new InternalTable(_table->_log2_size + 1);
 806 
 807   if (_new_table->_log2_size == _log2_size_limit) {
 808     _size_limit_reached = true;
 809   }
 810 
 811   return true;
 812 }
 813 
 814 template <typename CONFIG, MEMFLAGS F>
 815 inline void ConcurrentHashTable<CONFIG, F>::
 816   internal_grow_epilog(Thread* thread)
 817 {
 818   assert(_resize_lock_owner == thread, "Should be locked");
 819 
 820   InternalTable* old_table = set_table_from_new();
 821   unlock_resize_lock(thread);
 822 #ifdef ASSERT
 823   for (size_t i = 0; i < old_table->_size; i++) {
 824     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 825            "No poison found");
 826   }
 827 #endif
 828   // ABA safe, old_table not visible to any other threads.
 829   delete old_table;
 830 }
 831 
 832 template <typename CONFIG, MEMFLAGS F>
 833 inline bool ConcurrentHashTable<CONFIG, F>::
 834   internal_grow(Thread* thread, size_t log2_size)
 835 {
 836   if (!internal_grow_prolog(thread, log2_size)) {
 837     assert(_resize_lock_owner != thread, "Re-size lock held");
 838     return false;
 839   }
 840   assert(_resize_lock_owner == thread, "Should be locked by me");
 841   internal_grow_range(thread, 0, _table->_size);
 842   internal_grow_epilog(thread);
 843   assert(_resize_lock_owner != thread, "Re-size lock held");
 844   return true;
 845 }
 846 
 847 // Always called within critical section
 848 template <typename CONFIG, MEMFLAGS F>
 849 template <typename LOOKUP_FUNC>
 850 inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
 851   internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
 852 {
 853   bool clean = false;
 854   size_t loops = 0;
 855   VALUE* ret = NULL;
 856 
 857   const Bucket* bucket = get_bucket(lookup_f.get_hash());
 858   Node* node = get_node(bucket, lookup_f, &clean, &loops);
 859   if (node != NULL) {
 860     ret = node->value();
 861   }
 862   if (grow_hint != NULL) {
 863     *grow_hint = loops > _grow_hint;
 864   }
 865 
 866   return ret;
 867 }
 868 
 869 template <typename CONFIG, MEMFLAGS F>
 870 template <typename LOOKUP_FUNC>
 871 inline bool ConcurrentHashTable<CONFIG, F>::
 872   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
 873                   bool* grow_hint, bool* clean_hint)
 874 {
 875   bool ret = false;
 876   bool clean = false;
 877   bool locked;
 878   size_t loops = 0;
 879   size_t i = 0;
 880   uintx hash = lookup_f.get_hash();
 881   Node* new_node = Node::create_node(value, NULL);
 882 
 883   while (true) {
 884     {
 885       ScopedCS cs(thread, this); /* protected the table/bucket */
 886       Bucket* bucket = get_bucket(hash);
 887       Node* first_at_start = bucket->first();
 888       Node* old = get_node(bucket, lookup_f, &clean, &loops);
 889       if (old == NULL) {
 890         new_node->set_next(first_at_start);
 891         if (bucket->cas_first(new_node, first_at_start)) {
 892           JFR_ONLY(_stats_rate.add();)
 893           new_node = NULL;
 894           ret = true;
 895           break; /* leave critical section */
 896         }
 897         // CAS failed we must leave critical section and retry.
 898         locked = bucket->is_locked();
 899       } else {
 900         // There is a duplicate.
 901         break; /* leave critical section */
 902       }
 903     } /* leave critical section */
 904     i++;
 905     if (locked) {
 906       os::naked_yield();
 907     } else {
 908       SpinPause();
 909     }
 910   }
 911 
 912   if (new_node != NULL) {
 913     // CAS failed and a duplicate was inserted, we must free this node.
 914     Node::destroy_node(new_node);
 915   } else if (i == 0 && clean) {
 916     // We only do cleaning on fast inserts.
 917     Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 918     delete_in_bucket(thread, bucket, lookup_f);
 919     bucket->unlock();
 920     clean = false;
 921   }
 922 
 923   if (grow_hint != NULL) {
 924     *grow_hint = loops > _grow_hint;
 925   }
 926 
 927   if (clean_hint != NULL) {
 928     *clean_hint = clean;
 929   }
 930 
 931   return ret;
 932 }
 933 
 934 template <typename CONFIG, MEMFLAGS F>
 935 template <typename FUNC>
 936 inline bool ConcurrentHashTable<CONFIG, F>::
 937   visit_nodes(Bucket* bucket, FUNC& visitor_f)
 938 {
 939   Node* current_node = bucket->first();
 940   while (current_node != NULL) {
 941     if (!visitor_f(current_node->value())) {
 942       return false;
 943     }
 944     current_node = current_node->next();
 945   }
 946   return true;
 947 }
 948 
 949 template <typename CONFIG, MEMFLAGS F>
 950 template <typename FUNC>
 951 inline void ConcurrentHashTable<CONFIG, F>::
 952   do_scan_locked(Thread* thread, FUNC& scan_f)
 953 {
 954   assert(_resize_lock_owner == thread, "Re-size lock not held");
 955   // We can do a critical section over the entire loop but that would block
 956   // updates for a long time. Instead we choose to block resizes.
 957   InternalTable* table = get_table();
 958   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
 959     ScopedCS cs(thread, this);
 960     if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
 961       break; /* ends critical section */
 962     }
 963   } /* ends critical section */
 964 }
 965 
 966 template <typename CONFIG, MEMFLAGS F>
 967 template <typename EVALUATE_FUNC>
 968 inline size_t ConcurrentHashTable<CONFIG, F>::
 969   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
 970                      size_t num_del, Node** ndel)
 971 {
 972   size_t dels = 0;
 973   Node* const volatile * rem_n_prev = bucket->first_ptr();
 974   Node* rem_n = bucket->first();
 975   while (rem_n != NULL) {
 976     if (eval_f(rem_n->value())) {
 977       ndel[dels++] = rem_n;
 978       Node* next_node = rem_n->next();
 979       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 980       rem_n = next_node;
 981       if (dels == num_del) {
 982         break;
 983       }
 984     } else {
 985       rem_n_prev = rem_n->next_ptr();
 986       rem_n = rem_n->next();
 987     }
 988   }
 989   return dels;
 990 }
 991 
 992 // Constructor
 993 template <typename CONFIG, MEMFLAGS F>
 994 inline ConcurrentHashTable<CONFIG, F>::
 995   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
 996     : _new_table(NULL), _log2_size_limit(log2size_limit),
 997        _log2_start_size(log2size), _grow_hint(grow_hint),
 998        _size_limit_reached(false), _resize_lock_owner(NULL),
 999        _invisible_epoch(0)
1000 {
1001   _stats_rate = TableRateStatistics();
1002   _resize_lock =
1003     new Mutex(Mutex::leaf, "ConcurrentHashTable", true,
1004               Mutex::_safepoint_check_never);
1005   _table = new InternalTable(log2size);
1006   assert(log2size_limit >= log2size, "bad ergo");
1007   _size_limit_reached = _table->_log2_size == _log2_size_limit;
1008 }
1009 
1010 template <typename CONFIG, MEMFLAGS F>
1011 inline ConcurrentHashTable<CONFIG, F>::
1012   ~ConcurrentHashTable()
1013 {
1014   delete _resize_lock;
1015   free_nodes();
1016   delete _table;
1017 }
1018 
1019 template <typename CONFIG, MEMFLAGS F>
1020 inline size_t ConcurrentHashTable<CONFIG, F>::
1021   get_size_log2(Thread* thread)
1022 {
1023   ScopedCS cs(thread, this);
1024   return _table->_log2_size;
1025 }
1026 
1027 template <typename CONFIG, MEMFLAGS F>
1028 inline bool ConcurrentHashTable<CONFIG, F>::
1029   shrink(Thread* thread, size_t size_limit_log2)
1030 {
1031   size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
1032   bool ret = internal_shrink(thread, tmp);
1033   return ret;
1034 }
1035 
1036 template <typename CONFIG, MEMFLAGS F>
1037 inline bool ConcurrentHashTable<CONFIG, F>::
1038   grow(Thread* thread, size_t size_limit_log2)
1039 {
1040   size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
1041   return internal_grow(thread, tmp);
1042 }
1043 
1044 template <typename CONFIG, MEMFLAGS F>
1045 template <typename LOOKUP_FUNC, typename FOUND_FUNC>
1046 inline bool ConcurrentHashTable<CONFIG, F>::
1047   get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
1048 {
1049   bool ret = false;
1050   ScopedCS cs(thread, this);
1051   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1052   if (val != NULL) {
1053     found_f(val);
1054     ret = true;
1055   }
1056   return ret;
1057 }
1058 
1059 template <typename CONFIG, MEMFLAGS F>
1060 inline bool ConcurrentHashTable<CONFIG, F>::
1061   unsafe_insert(const VALUE& value) {
1062   bool dead_hash = false;
1063   size_t hash = CONFIG::get_hash(value, &dead_hash);
1064   if (dead_hash) {
1065     return false;
1066   }
1067   // This is an unsafe operation.
1068   InternalTable* table = get_table();
1069   Bucket* bucket = get_bucket_in(table, hash);
1070   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1071   Node* new_node = Node::create_node(value, bucket->first());
1072   if (!bucket->cas_first(new_node, bucket->first())) {
1073     assert(false, "bad");
1074   }
1075   JFR_ONLY(_stats_rate.add();)
1076   return true;
1077 }
1078 
1079 template <typename CONFIG, MEMFLAGS F>
1080 template <typename SCAN_FUNC>
1081 inline bool ConcurrentHashTable<CONFIG, F>::
1082   try_scan(Thread* thread, SCAN_FUNC& scan_f)
1083 {
1084   if (!try_resize_lock(thread)) {
1085     return false;
1086   }
1087   do_scan_locked(thread, scan_f);
1088   unlock_resize_lock(thread);
1089   return true;
1090 }
1091 
1092 template <typename CONFIG, MEMFLAGS F>
1093 template <typename SCAN_FUNC>
1094 inline void ConcurrentHashTable<CONFIG, F>::
1095   do_scan(Thread* thread, SCAN_FUNC& scan_f)
1096 {
1097   assert(!SafepointSynchronize::is_at_safepoint(),
1098          "must be outside a safepoint");
1099   assert(_resize_lock_owner != thread, "Re-size lock held");
1100   lock_resize_lock(thread);
1101   do_scan_locked(thread, scan_f);
1102   unlock_resize_lock(thread);
1103   assert(_resize_lock_owner != thread, "Re-size lock held");
1104 }
1105 
1106 template <typename CONFIG, MEMFLAGS F>
1107 template <typename SCAN_FUNC>
1108 inline void ConcurrentHashTable<CONFIG, F>::
1109   do_safepoint_scan(SCAN_FUNC& scan_f)
1110 {
1111   // We only allow this method to be used during a safepoint.
1112   assert(SafepointSynchronize::is_at_safepoint(),
1113          "must only be called in a safepoint");
1114   assert(Thread::current()->is_VM_thread(),
1115          "should be in vm thread");
1116 
1117   // Here we skip protection,
1118   // thus no other thread may use this table at the same time.
1119   InternalTable* table = get_table();
1120   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1121     Bucket* bucket = table->get_bucket(bucket_it);
1122     // If bucket have a redirect the items will be in the new table.
1123     // We must visit them there since the new table will contain any
1124     // concurrent inserts done after this bucket was resized.
1125     // If the bucket don't have redirect flag all items is in this table.
1126     if (!bucket->have_redirect()) {
1127       if(!visit_nodes(bucket, scan_f)) {
1128         return;
1129       }
1130     } else {
1131       assert(bucket->is_locked(), "Bucket must be locked.");
1132     }
1133   }
1134   // If there is a paused resize we also need to visit the already resized items.
1135   table = get_new_table();
1136   if (table == NULL) {
1137     return;
1138   }
1139   DEBUG_ONLY(if (table == POISON_PTR) { return; })
1140   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1141     Bucket* bucket = table->get_bucket(bucket_it);
1142     assert(!bucket->is_locked(), "Bucket must be unlocked.");
1143     if (!visit_nodes(bucket, scan_f)) {
1144       return;
1145     }
1146   }
1147 }
1148 
1149 template <typename CONFIG, MEMFLAGS F>
1150 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1151 inline bool ConcurrentHashTable<CONFIG, F>::
1152   try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1153 {
1154   if (!try_resize_lock(thread)) {
1155     return false;
1156   }
1157   do_bulk_delete_locked(thread, eval_f, del_f);
1158   unlock_resize_lock(thread);
1159   assert(_resize_lock_owner != thread, "Re-size lock held");
1160   return true;
1161 }
1162 
1163 template <typename CONFIG, MEMFLAGS F>
1164 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1165 inline void ConcurrentHashTable<CONFIG, F>::
1166   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1167 {
1168   assert(!SafepointSynchronize::is_at_safepoint(),
1169          "must be outside a safepoint");
1170   lock_resize_lock(thread);
1171   do_bulk_delete_locked(thread, eval_f, del_f);
1172   unlock_resize_lock(thread);
1173 }
1174 
1175 template <typename CONFIG, MEMFLAGS F>
1176 template <typename VALUE_SIZE_FUNC>
1177 inline TableStatistics ConcurrentHashTable<CONFIG, F>::
1178   statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)
1179 {
1180   NumberSeq summary;
1181   size_t literal_bytes = 0;
1182   InternalTable* table = get_table();
1183   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1184     ScopedCS cs(thread, this);
1185     size_t count = 0;
1186     Bucket* bucket = table->get_bucket(bucket_it);
1187     if (bucket->have_redirect() || bucket->is_locked()) {
1188       continue;
1189     }
1190     Node* current_node = bucket->first();
1191     while (current_node != NULL) {
1192       ++count;
1193       literal_bytes += vs_f(current_node->value());
1194       current_node = current_node->next();
1195     }
1196     summary.add((double)count);
1197   }
1198 
1199   return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node));
1200 }
1201 
1202 template <typename CONFIG, MEMFLAGS F>
1203 template <typename VALUE_SIZE_FUNC>
1204 inline TableStatistics ConcurrentHashTable<CONFIG, F>::
1205   statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old)
1206 {
1207   if (!try_resize_lock(thread)) {
1208     return old;
1209   }
1210 
1211   TableStatistics ts = statistics_calculate(thread, vs_f);
1212   unlock_resize_lock(thread);
1213 
1214   return ts;
1215 }
1216 
1217 template <typename CONFIG, MEMFLAGS F>
1218 template <typename VALUE_SIZE_FUNC>
1219 inline void ConcurrentHashTable<CONFIG, F>::
1220   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1221                 outputStream* st, const char* table_name)
1222 {
1223   if (!try_resize_lock(thread)) {
1224     st->print_cr("statistics unavailable at this moment");
1225     return;
1226   }
1227 
1228   TableStatistics ts = statistics_calculate(thread, vs_f);
1229   unlock_resize_lock(thread);
1230 
1231   ts.print(st, table_name);
1232 }
1233 
1234 template <typename CONFIG, MEMFLAGS F>
1235 inline bool ConcurrentHashTable<CONFIG, F>::
1236   try_move_nodes_to(Thread* thread, ConcurrentHashTable<CONFIG, F>* to_cht)
1237 {
1238   if (!try_resize_lock(thread)) {
1239     return false;
1240   }
1241   assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");
1242   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1243     Bucket* bucket = _table->get_bucket(bucket_it);
1244     assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1245     while (bucket->first() != NULL) {
1246       Node* move_node = bucket->first();
1247       bool ok = bucket->cas_first(move_node->next(), move_node);
1248       assert(ok, "Uncontended cas must work");
1249       bool dead_hash = false;
1250       size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1251       if (!dead_hash) {
1252         Bucket* insert_bucket = to_cht->get_bucket(insert_hash);
1253         assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present");
1254         move_node->set_next(insert_bucket->first());
1255         ok = insert_bucket->cas_first(move_node, insert_bucket->first());
1256         assert(ok, "Uncontended cas must work");
1257       }
1258     }
1259   }
1260   unlock_resize_lock(thread);
1261   return true;
1262 }
1263 
1264 #endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP