1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
  26 #define SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
  27 
  28 #include "memory/allocation.inline.hpp"
  29 #include "runtime/atomic.hpp"
  30 #include "runtime/orderAccess.hpp"
  31 #include "runtime/prefetch.inline.hpp"
  32 #include "utilities/concurrentHashTable.hpp"
  33 #include "utilities/globalCounter.inline.hpp"
  34 #include "utilities/numberSeq.hpp"
  35 #include "utilities/spinYield.hpp"
  36 
  37 // 2^30 = 1G buckets
  38 #define SIZE_BIG_LOG2 30
  39 // 2^5  = 32 buckets
  40 #define SIZE_SMALL_LOG2 5
  41 
  42 // Number from spinYield.hpp. In some loops SpinYield would be unfair.
  43 #define SPINPAUSES_PER_YIELD 8192
  44 
  45 #ifdef ASSERT
  46 #ifdef _LP64
  47 // Two low bits are not usable.
  48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
  49 #else
  50 // Two low bits are not usable.
  51 static const void* POISON_PTR = (void*)0xffbadbac;
  52 #endif
  53 #endif
  54 
  55 // Node
  56 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  57 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  58 ConcurrentHashTable<VALUE, CONFIG, F>::
  59   Node::next() const
  60 {
  61   return OrderAccess::load_acquire(&_next);
  62 }
  63 
  64 // Bucket
  65 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  66 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  67 ConcurrentHashTable<VALUE, CONFIG, F>::
  68   Bucket::first_raw() const
  69 {
  70   return OrderAccess::load_acquire(&_first);
  71 }
  72 
  73 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  74 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
  75   Bucket::release_assign_node_ptr(
  76     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst,
  77     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const
  78 {
  79   // Due to this assert this methods is not static.
  80   assert(is_locked(), "Must be locked.");
  81   Node** tmp = (Node**)dst;
  82   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
  83 }
  84 
  85 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  86 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
  87 ConcurrentHashTable<VALUE, CONFIG, F>::
  88   Bucket::first() const
  89 {
  90   // We strip the states bit before returning the ptr.
  91   return clear_state(OrderAccess::load_acquire(&_first));
  92 }
  93 
  94 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  95 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  96   Bucket::have_redirect() const
  97 {
  98   return is_state(first_raw(), STATE_REDIRECT_BIT);
  99 }
 100 
 101 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 102 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 103   Bucket::is_locked() const
 104 {
 105   return is_state(first_raw(), STATE_LOCK_BIT);
 106 }
 107 
 108 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 109 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 110   Bucket::lock()
 111 {
 112   int i = 0;
 113   // SpinYield would be unfair here
 114   while (!this->trylock()) {
 115     if ((++i) == SPINPAUSES_PER_YIELD) {
 116       // On contemporary OS yielding will give CPU to another runnable thread if
 117       // there is no CPU available.
 118       os::naked_yield();
 119       i = 0;
 120     } else {
 121       SpinPause();
 122     }
 123   }
 124 }
 125 
 126 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 127 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 128   Bucket::release_assign_last_node_next(
 129      typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node)
 130 {
 131   assert(is_locked(), "Must be locked.");
 132   Node* const volatile * ret = first_ptr();
 133   while (clear_state(*ret) != NULL) {
 134     ret = clear_state(*ret)->next_ptr();
 135   }
 136   release_assign_node_ptr(ret, node);
 137 }
 138 
 139 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 140 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 141   Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node,
 142                     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect
 143                     )
 144 {
 145   if (is_locked()) {
 146     return false;
 147   }
 148   if (Atomic::cmpxchg(node, &_first, expect) == expect) {
 149     return true;
 150   }
 151   return false;
 152 }
 153 
 154 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 155 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 156   Bucket::trylock()
 157 {
 158   if (is_locked()) {
 159     return false;
 160   }
 161   // We will expect a clean first pointer.
 162   Node* tmp = first();
 163   if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) {
 164     return true;
 165   }
 166   return false;
 167 }
 168 
 169 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 170 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 171   Bucket::unlock()
 172 {
 173   assert(is_locked(), "Must be locked.");
 174   assert(!have_redirect(),
 175          "Unlocking a bucket after it has reached terminal state.");
 176   OrderAccess::release_store(&_first, clear_state(first()));
 177 }
 178 
 179 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 180 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 181   Bucket::redirect()
 182 {
 183   assert(is_locked(), "Must be locked.");
 184   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
 185 }
 186 
 187 // InternalTable
 188 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 189 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 190   InternalTable::InternalTable(size_t log2_size)
 191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
 192       _hash_mask(~(~((size_t)0) << _log2_size))
 193 {
 194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
 195          "Bad size");
 196   _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F);
 197   // Use placement new for each element instead of new[] which could use more
 198   // memory than allocated.
 199   for (size_t i = 0; i < _size; ++i) {
 200     new (_buckets + i) Bucket();
 201   }
 202 }
 203 
 204 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 205 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 206   InternalTable::~InternalTable()
 207 {
 208   FREE_C_HEAP_ARRAY(Bucket, _buckets);
 209 }
 210 
 211 // ScopedCS
 212 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 213 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 214   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht)
 215     : _thread(thread),
 216       _cht(cht),
 217       _cs_context(GlobalCounter::critical_section_begin(_thread))
 218 {
 219   // This version is published now.
 220   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
 221     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
 222   }
 223 }
 224 
 225 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 226 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 227   ScopedCS::~ScopedCS()
 228 {
 229   GlobalCounter::critical_section_end(_thread, _cs_context);
 230 }
 231 
 232 // BaseConfig
 233 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 234 inline void* ConcurrentHashTable<VALUE, CONFIG, F>::
 235   BaseConfig::allocate_node(size_t size, const VALUE& value)
 236 {
 237   return AllocateHeap(size, F);
 238 }
 239 
 240 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 241 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 242   BaseConfig::free_node(void* memory, const VALUE& value)
 243 {
 244   FreeHeap(memory);
 245 }
 246 
 247 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 248 template <typename LOOKUP_FUNC>
 249 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
 250   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
 251 {
 252   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
 253 }
 254 
 255 // HaveDeletables
 256 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 257 template <typename EVALUATE_FUNC>
 258 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 259   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 260                                                       EVALUATE_FUNC& eval_f,
 261                                                       Bucket* prefetch_bucket)
 262 {
 263   // Instantiated for pointer type (true), so we can use prefetch.
 264   // When visiting all Nodes doing this prefetch give around 30%.
 265   Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
 266   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 267     if (pref != NULL) {
 268       Prefetch::read(*pref->value(), 0);
 269       pref = pref->next();
 270     }
 271     // Read next() Node* once.  May be racing with a thread moving the next
 272     // pointers.
 273     Node* next_pref = next->next();
 274     if (next_pref != NULL) {
 275       Prefetch::read(*next_pref->value(), 0);
 276     }
 277     if (eval_f(next->value())) {
 278       return true;
 279     }
 280   }
 281   return false;
 282 }
 283 
 284 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 285 template <bool b, typename EVALUATE_FUNC>
 286 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 287   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 288                                                    EVALUATE_FUNC& eval_f,
 289                                                    Bucket* preb)
 290 {
 291   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 292     if (eval_f(next->value())) {
 293       return true;
 294     }
 295   }
 296   return false;
 297 }
 298 
 299 // ConcurrentHashTable
 300 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 301 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 302   write_synchonize_on_visible_epoch(Thread* thread)
 303 {
 304   assert(_resize_lock_owner == thread, "Re-size lock not held");
 305   OrderAccess::fence(); // Prevent below load from floating up.
 306   // If no reader saw this version we can skip write_synchronize.
 307   if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
 308     return;
 309   }
 310   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
 311   // We set this/next version that we are synchronizing for to not published.
 312   // A reader will zero this flag if it reads this/next version.
 313   OrderAccess::release_store(&_invisible_epoch, thread);
 314   GlobalCounter::write_synchronize();
 315 }
 316 
 317 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 318 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 319   try_resize_lock(Thread* locker)
 320 {
 321   if (_resize_lock->try_lock()) {
 322     if (_resize_lock_owner != NULL) {
 323       assert(locker != _resize_lock_owner, "Already own lock");
 324       // We got mutex but internal state is locked.
 325       _resize_lock->unlock();
 326       return false;
 327     }
 328   } else {
 329     return false;
 330   }
 331   _invisible_epoch = 0;
 332   _resize_lock_owner = locker;
 333   return true;
 334 }
 335 
 336 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 337 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 338   lock_resize_lock(Thread* locker)
 339 {
 340   size_t i = 0;
 341   // If lock is hold by some other thread, the chances that it is return quick
 342   // is low. So we will prefer yielding.
 343   SpinYield yield(1, 512);
 344   do {
 345     _resize_lock->lock_without_safepoint_check();
 346     // If holder of lock dropped mutex for safepoint mutex might be unlocked,
 347     // and _resize_lock_owner will contain the owner.
 348     if (_resize_lock_owner != NULL) {
 349       assert(locker != _resize_lock_owner, "Already own lock");
 350       // We got mutex but internal state is locked.
 351       _resize_lock->unlock();
 352       yield.wait();
 353     } else {
 354       break;
 355     }
 356   } while(true);
 357   _resize_lock_owner = locker;
 358   _invisible_epoch = 0;
 359 }
 360 
 361 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 362 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 363   unlock_resize_lock(Thread* locker)
 364 {
 365   _invisible_epoch = 0;
 366   assert(locker == _resize_lock_owner, "Not unlocked by locker.");
 367   _resize_lock_owner = NULL;
 368   _resize_lock->unlock();
 369 }
 370 
 371 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 372 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 373   free_nodes()
 374 {
 375   // We assume we are not MT during freeing.
 376   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
 377     Bucket* bucket = _table->get_buckets() + node_it;
 378     Node* node = bucket->first();
 379     while (node != NULL) {
 380       Node* free_node = node;
 381       node = node->next();
 382       Node::destroy_node(free_node);
 383     }
 384   }
 385 }
 386 
 387 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 388 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 389 ConcurrentHashTable<VALUE, CONFIG, F>::
 390   get_table() const
 391 {
 392   return OrderAccess::load_acquire(&_table);
 393 }
 394 
 395 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 396 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 397 ConcurrentHashTable<VALUE, CONFIG, F>::
 398   get_new_table() const
 399 {
 400   return OrderAccess::load_acquire(&_new_table);
 401 }
 402 
 403 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 404 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
 405 ConcurrentHashTable<VALUE, CONFIG, F>::
 406   set_table_from_new()
 407 {
 408   InternalTable* old_table = _table;
 409   // Publish the new table.
 410   OrderAccess::release_store(&_table, _new_table);
 411   // All must see this.
 412   GlobalCounter::write_synchronize();
 413   // _new_table not read any more.
 414   _new_table = NULL;
 415   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
 416   return old_table;
 417 }
 418 
 419 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 420 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 421   internal_grow_range(Thread* thread, size_t start, size_t stop)
 422 {
 423   assert(stop <= _table->_size, "Outside backing array");
 424   assert(_new_table != NULL, "Grow not proper setup before start");
 425   // The state is also copied here. Hence all buckets in new table will be
 426   // locked. I call the siblings odd/even, where even have high bit 0 and odd
 427   // have high bit 1.
 428   for (size_t even_index = start; even_index < stop; even_index++) {
 429     Bucket* bucket = _table->get_bucket(even_index);
 430 
 431     bucket->lock();
 432 
 433     size_t odd_index = even_index + _table->_size;
 434     _new_table->get_buckets()[even_index] = *bucket;
 435     _new_table->get_buckets()[odd_index] = *bucket;
 436 
 437     // Moves lockers go to new table, where they will wait until unlock() below.
 438     bucket->redirect(); /* Must release stores above */
 439 
 440     // When this is done we have separated the nodes into corresponding buckets
 441     // in new table.
 442     if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) {
 443       // If bucket is empty, unzip does nothing.
 444       // We must make sure readers go to new table before we poison the bucket.
 445       DEBUG_ONLY(GlobalCounter::write_synchronize();)
 446     }
 447 
 448     // Unlock for writes into the new table buckets.
 449     _new_table->get_bucket(even_index)->unlock();
 450     _new_table->get_bucket(odd_index)->unlock();
 451 
 452     DEBUG_ONLY(
 453        bucket->release_assign_node_ptr(
 454           _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
 455     )
 456   }
 457 }
 458 
 459 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 460 template <typename LOOKUP_FUNC, typename DELETE_FUNC>
 461 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 462   internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
 463 {
 464   Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 465   assert(bucket->is_locked(), "Must be locked.");
 466   Node* const volatile * rem_n_prev = bucket->first_ptr();
 467   Node* rem_n = bucket->first();
 468   bool have_dead = false;
 469   while (rem_n != NULL) {
 470     if (lookup_f.equals(rem_n->value(), &have_dead)) {
 471       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 472       break;
 473     } else {
 474       rem_n_prev = rem_n->next_ptr();
 475       rem_n = rem_n->next();
 476     }
 477   }
 478 
 479   bucket->unlock();
 480 
 481   if (rem_n == NULL) {
 482     return false;
 483   }
 484   // Publish the deletion.
 485   GlobalCounter::write_synchronize();
 486   delete_f(rem_n->value());
 487   Node::destroy_node(rem_n);
 488   return true;
 489 }
 490 
 491 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 492 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 493 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 494   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
 495                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
 496 {
 497   // Here we have resize lock so table is SMR safe, and there is no new
 498   // table. Can do this in parallel if we want.
 499   assert((is_mt && _resize_lock_owner != NULL) ||
 500          (!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
 501   Node* ndel[BULK_DELETE_LIMIT];
 502   InternalTable* table = get_table();
 503   assert(start_idx < stop_idx, "Must be");
 504   assert(stop_idx <= _table->_size, "Must be");
 505   // Here manual do critical section since we don't want to take the cost of
 506   // locking the bucket if there is nothing to delete. But we can have
 507   // concurrent single deletes. The _invisible_epoch can only be used by the
 508   // owner of _resize_lock, us here. There we should not changed it in our
 509   // own read-side.
 510   GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(thread);
 511   for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
 512     Bucket* bucket = table->get_bucket(bucket_it);
 513     Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
 514                               table->get_bucket(bucket_it+1) : NULL;
 515 
 516     if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
 517         have_deletable(bucket, eval_f, prefetch_bucket)) {
 518         // Nothing to remove in this bucket.
 519         continue;
 520     }
 521 
 522     GlobalCounter::critical_section_end(thread, cs_context);
 523     // We left critical section but the bucket cannot be removed while we hold
 524     // the _resize_lock.
 525     bucket->lock();
 526     size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
 527     bucket->unlock();
 528     if (is_mt) {
 529       GlobalCounter::write_synchronize();
 530     } else {
 531       write_synchonize_on_visible_epoch(thread);
 532     }
 533     for (size_t node_it = 0; node_it < nd; node_it++) {
 534       del_f(ndel[node_it]->value());
 535       Node::destroy_node(ndel[node_it]);
 536       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 537     }
 538     cs_context = GlobalCounter::critical_section_begin(thread);
 539   }
 540   GlobalCounter::critical_section_end(thread, cs_context);
 541 }
 542 
 543 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 544 template <typename LOOKUP_FUNC>
 545 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 546   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 547 {
 548   assert(bucket->is_locked(), "Must be locked.");
 549 
 550   size_t dels = 0;
 551   Node* ndel[BULK_DELETE_LIMIT];
 552   Node* const volatile * rem_n_prev = bucket->first_ptr();
 553   Node* rem_n = bucket->first();
 554   while (rem_n != NULL) {
 555     bool is_dead = false;
 556     lookup_f.equals(rem_n->value(), &is_dead);
 557     if (is_dead) {
 558       ndel[dels++] = rem_n;
 559       Node* next_node = rem_n->next();
 560       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 561       rem_n = next_node;
 562       if (dels == BULK_DELETE_LIMIT) {
 563         break;
 564       }
 565     } else {
 566       rem_n_prev = rem_n->next_ptr();
 567       rem_n = rem_n->next();
 568     }
 569   }
 570   if (dels > 0) {
 571     GlobalCounter::write_synchronize();
 572     for (size_t node_it = 0; node_it < dels; node_it++) {
 573       Node::destroy_node(ndel[node_it]);
 574       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 575     }
 576   }
 577 }
 578 
 579 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 580 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 581 ConcurrentHashTable<VALUE, CONFIG, F>::
 582   get_bucket(uintx hash) const
 583 {
 584   InternalTable* table = get_table();
 585   Bucket* bucket = get_bucket_in(table, hash);
 586   if (bucket->have_redirect()) {
 587     table = get_new_table();
 588     bucket = get_bucket_in(table, hash);
 589   }
 590   return bucket;
 591 }
 592 
 593 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 594 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 595 ConcurrentHashTable<VALUE, CONFIG, F>::
 596   get_bucket_locked(Thread* thread, const uintx hash)
 597 {
 598   Bucket* bucket;
 599   int i = 0;
 600   // SpinYield would be unfair here
 601   while(true) {
 602     {
 603       // We need a critical section to protect the table itself. But if we fail
 604       // we must leave critical section otherwise we would deadlock.
 605       ScopedCS cs(thread, this);
 606       bucket = get_bucket(hash);
 607       if (bucket->trylock()) {
 608         break; /* ends critical section */
 609       }
 610     } /* ends critical section */
 611     if ((++i) == SPINPAUSES_PER_YIELD) {
 612       // On contemporary OS yielding will give CPU to another runnable thread if
 613       // there is no CPU available.
 614       os::naked_yield();
 615       i = 0;
 616     } else {
 617       SpinPause();
 618     }
 619   }
 620   return bucket;
 621 }
 622 
 623 // Always called within critical section
 624 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 625 template <typename LOOKUP_FUNC>
 626 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
 627 ConcurrentHashTable<VALUE, CONFIG, F>::
 628   get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
 629            bool* have_dead, size_t* loops) const
 630 {
 631   size_t loop_count = 0;
 632   Node* node = bucket->first();
 633   while (node != NULL) {
 634     bool is_dead = false;
 635     ++loop_count;
 636     if (lookup_f.equals(node->value(), &is_dead)) {
 637       break;
 638     }
 639     if (is_dead && !(*have_dead)) {
 640       *have_dead = true;
 641     }
 642     node = node->next();
 643   }
 644   if (loops != NULL) {
 645     *loops = loop_count;
 646   }
 647   return node;
 648 }
 649 
 650 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 651 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 652   unzip_bucket(Thread* thread, InternalTable* old_table,
 653                InternalTable* new_table, size_t even_index, size_t odd_index)
 654 {
 655   Node* aux = old_table->get_bucket(even_index)->first();
 656   if (aux == NULL) {
 657     // This is an empty bucket and in debug we poison first ptr in bucket.
 658     // Therefore we must make sure no readers are looking at this bucket.
 659     // If we don't do a write_synch here, caller must do it.
 660     return false;
 661   }
 662   Node* delete_me = NULL;
 663   Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
 664   Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
 665   while (aux != NULL) {
 666     bool dead_hash = false;
 667     size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
 668     Node* aux_next = aux->next();
 669     if (dead_hash) {
 670       delete_me = aux;
 671       // This item is dead, move both list to next
 672       new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 673                                                                 aux_next);
 674       new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 675                                                                  aux_next);
 676     } else {
 677       size_t aux_index = bucket_idx_hash(new_table, aux_hash);
 678       if (aux_index == even_index) {
 679         // This is a even, so move odd to aux/even next
 680         new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 681                                                                   aux_next);
 682         // Keep in even list
 683         even = aux->next_ptr();
 684       } else if (aux_index == odd_index) {
 685         // This is a odd, so move odd to aux/odd next
 686         new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 687                                                                    aux_next);
 688         // Keep in odd list
 689         odd = aux->next_ptr();
 690       } else {
 691         fatal("aux_index does not match even or odd indices");
 692       }
 693     }
 694     aux = aux_next;
 695 
 696     // We can only move 1 pointer otherwise a reader might be moved to the wrong
 697     // chain. E.g. looking for even hash value but got moved to the odd bucket
 698     // chain.
 699     write_synchonize_on_visible_epoch(thread);
 700     if (delete_me != NULL) {
 701       Node::destroy_node(delete_me);
 702       delete_me = NULL;
 703     }
 704   }
 705   return true;
 706 }
 707 
 708 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 709 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 710   internal_shrink_prolog(Thread* thread, size_t log2_size)
 711 {
 712   if (!try_resize_lock(thread)) {
 713     return false;
 714   }
 715   assert(_resize_lock_owner == thread, "Re-size lock not held");
 716   if (_table->_log2_size == _log2_start_size ||
 717       _table->_log2_size <= log2_size) {
 718     unlock_resize_lock(thread);
 719     return false;
 720   }
 721   _new_table = new InternalTable(_table->_log2_size - 1);
 722   return true;
 723 }
 724 
 725 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 726 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 727   internal_shrink_epilog(Thread* thread)
 728 {
 729   assert(_resize_lock_owner == thread, "Re-size lock not held");
 730 
 731   InternalTable* old_table = set_table_from_new();
 732   _size_limit_reached = false;
 733   unlock_resize_lock(thread);
 734 #ifdef ASSERT
 735   for (size_t i = 0; i < old_table->_size; i++) {
 736     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 737            "No poison found");
 738   }
 739 #endif
 740   // ABA safe, old_table not visible to any other threads.
 741   delete old_table;
 742 }
 743 
 744 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 745 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 746   internal_shrink_range(Thread* thread, size_t start, size_t stop)
 747 {
 748   // The state is also copied here.
 749   // Hence all buckets in new table will be locked.
 750   for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
 751     size_t even_hash_index = bucket_it; // High bit 0
 752     size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1
 753 
 754     Bucket* b_old_even = _table->get_bucket(even_hash_index);
 755     Bucket* b_old_odd  = _table->get_bucket(odd_hash_index);
 756 
 757     b_old_even->lock();
 758     b_old_odd->lock();
 759 
 760     _new_table->get_buckets()[bucket_it] = *b_old_even;
 761 
 762     // Put chains together.
 763     _new_table->get_bucket(bucket_it)->
 764       release_assign_last_node_next(*(b_old_odd->first_ptr()));
 765 
 766     b_old_even->redirect();
 767     b_old_odd->redirect();
 768 
 769     write_synchonize_on_visible_epoch(thread);
 770 
 771     // Unlock for writes into new smaller table.
 772     _new_table->get_bucket(bucket_it)->unlock();
 773 
 774     DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
 775                                                    (Node*)POISON_PTR);)
 776     DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
 777                                                   (Node*)POISON_PTR);)
 778   }
 779 }
 780 
 781 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 782 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 783   internal_shrink(Thread* thread, size_t log2_size)
 784 {
 785   if (!internal_shrink_prolog(thread, log2_size)) {
 786     assert(_resize_lock_owner != thread, "Re-size lock held");
 787     return false;
 788   }
 789   assert(_resize_lock_owner == thread, "Should be locked by me");
 790   internal_shrink_range(thread, 0, _new_table->_size);
 791   internal_shrink_epilog(thread);
 792   assert(_resize_lock_owner != thread, "Re-size lock held");
 793   return true;
 794 }
 795 
 796 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 797 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 798   internal_grow_prolog(Thread* thread, size_t log2_size)
 799 {
 800   // This double checking of _size_limit_reached/is_max_size_reached()
 801   //  we only do in grow path, since grow means high load on table
 802   // while shrink means low load.
 803   if (is_max_size_reached()) {
 804     return false;
 805   }
 806   if (!try_resize_lock(thread)) {
 807     // Either we have an ongoing resize or an operation which doesn't want us
 808     // to resize now.
 809     return false;
 810   }
 811   if (is_max_size_reached() || _table->_log2_size >= log2_size) {
 812     unlock_resize_lock(thread);
 813     return false;
 814   }
 815 
 816   _new_table = new InternalTable(_table->_log2_size + 1);
 817 
 818   if (_new_table->_log2_size == _log2_size_limit) {
 819     _size_limit_reached = true;
 820   }
 821 
 822   return true;
 823 }
 824 
 825 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 826 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 827   internal_grow_epilog(Thread* thread)
 828 {
 829   assert(_resize_lock_owner == thread, "Should be locked");
 830 
 831   InternalTable* old_table = set_table_from_new();
 832   unlock_resize_lock(thread);
 833 #ifdef ASSERT
 834   for (size_t i = 0; i < old_table->_size; i++) {
 835     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
 836            "No poison found");
 837   }
 838 #endif
 839   // ABA safe, old_table not visible to any other threads.
 840   delete old_table;
 841 }
 842 
 843 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 844 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 845   internal_grow(Thread* thread, size_t log2_size)
 846 {
 847   if (!internal_grow_prolog(thread, log2_size)) {
 848     assert(_resize_lock_owner != thread, "Re-size lock held");
 849     return false;
 850   }
 851   assert(_resize_lock_owner == thread, "Should be locked by me");
 852   internal_grow_range(thread, 0, _table->_size);
 853   internal_grow_epilog(thread);
 854   assert(_resize_lock_owner != thread, "Re-size lock held");
 855   return true;
 856 }
 857 
 858 // Always called within critical section
 859 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 860 template <typename LOOKUP_FUNC>
 861 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
 862   internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
 863 {
 864   bool clean = false;
 865   size_t loops = 0;
 866   VALUE* ret = NULL;
 867 
 868   const Bucket* bucket = get_bucket(lookup_f.get_hash());
 869   Node* node = get_node(bucket, lookup_f, &clean, &loops);
 870   if (node != NULL) {
 871     ret = node->value();
 872   }
 873   if (grow_hint != NULL) {
 874     *grow_hint = loops > _grow_hint;
 875   }
 876 
 877   return ret;
 878 }
 879 
 880 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 881 template <typename LOOKUP_FUNC>
 882 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 883   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
 884                   bool* grow_hint, bool* clean_hint)
 885 {
 886   bool ret = false;
 887   bool clean = false;
 888   bool locked;
 889   size_t loops = 0;
 890   size_t i = 0;
 891   uintx hash = lookup_f.get_hash();
 892   Node* new_node = Node::create_node(value, NULL);
 893 
 894   while (true) {
 895     {
 896       ScopedCS cs(thread, this); /* protected the table/bucket */
 897       Bucket* bucket = get_bucket(hash);
 898       Node* first_at_start = bucket->first();
 899       Node* old = get_node(bucket, lookup_f, &clean, &loops);
 900       if (old == NULL) {
 901         new_node->set_next(first_at_start);
 902         if (bucket->cas_first(new_node, first_at_start)) {
 903           new_node = NULL;
 904           ret = true;
 905           break; /* leave critical section */
 906         }
 907         // CAS failed we must leave critical section and retry.
 908         locked = bucket->is_locked();
 909       } else {
 910         // There is a duplicate.
 911         break; /* leave critical section */
 912       }
 913     } /* leave critical section */
 914     i++;
 915     if (locked) {
 916       os::naked_yield();
 917     } else {
 918       SpinPause();
 919     }
 920   }
 921 
 922   if (new_node != NULL) {
 923     // CAS failed and a duplicate was inserted, we must free this node.
 924     Node::destroy_node(new_node);
 925   } else if (i == 0 && clean) {
 926     // We only do cleaning on fast inserts.
 927     Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
 928     delete_in_bucket(thread, bucket, lookup_f);
 929     bucket->unlock();
 930     clean = false;
 931   }
 932 
 933   if (grow_hint != NULL) {
 934     *grow_hint = loops > _grow_hint;
 935   }
 936 
 937   if (clean_hint != NULL) {
 938     *clean_hint = clean;
 939   }
 940 
 941   return ret;
 942 }
 943 
 944 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 945 template <typename FUNC>
 946 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 947   visit_nodes(Bucket* bucket, FUNC& visitor_f)
 948 {
 949   Node* current_node = bucket->first();
 950   while (current_node != NULL) {
 951     if (!visitor_f(current_node->value())) {
 952       return false;
 953     }
 954     current_node = current_node->next();
 955   }
 956   return true;
 957 }
 958 
 959 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 960 template <typename FUNC>
 961 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 962   do_scan_locked(Thread* thread, FUNC& scan_f)
 963 {
 964   assert(_resize_lock_owner == thread, "Re-size lock not held");
 965   // We can do a critical section over the entire loop but that would block
 966   // updates for a long time. Instead we choose to block resizes.
 967   InternalTable* table = get_table();
 968   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
 969     ScopedCS cs(thread, this);
 970     if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
 971       break; /* ends critical section */
 972     }
 973   } /* ends critical section */
 974 }
 975 
 976 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 977 template <typename EVALUATE_FUNC>
 978 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
 979   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
 980                      size_t num_del, Node** ndel)
 981 {
 982   size_t dels = 0;
 983   Node* const volatile * rem_n_prev = bucket->first_ptr();
 984   Node* rem_n = bucket->first();
 985   while (rem_n != NULL) {
 986     if (eval_f(rem_n->value())) {
 987       ndel[dels++] = rem_n;
 988       Node* next_node = rem_n->next();
 989       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 990       rem_n = next_node;
 991       if (dels == num_del) {
 992         break;
 993       }
 994     } else {
 995       rem_n_prev = rem_n->next_ptr();
 996       rem_n = rem_n->next();
 997     }
 998   }
 999   return dels;
1000 }
1001 
1002 // Constructor
1003 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1004 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1005   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
1006     : _new_table(NULL), _log2_size_limit(log2size_limit),
1007        _log2_start_size(log2size), _grow_hint(grow_hint),
1008        _size_limit_reached(false), _resize_lock_owner(NULL),
1009        _invisible_epoch(0)
1010 {
1011   _resize_lock =
1012     new Mutex(Mutex::leaf, "ConcurrentHashTable", false,
1013               Monitor::_safepoint_check_never);
1014   _table = new InternalTable(log2size);
1015   assert(log2size_limit >= log2size, "bad ergo");
1016   _size_limit_reached = _table->_log2_size == _log2_size_limit;
1017 }
1018 
1019 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1020 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1021   ~ConcurrentHashTable()
1022 {
1023   delete _resize_lock;
1024   free_nodes();
1025   delete _table;
1026 }
1027 
1028 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1029 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
1030   get_size_log2(Thread* thread)
1031 {
1032   ScopedCS cs(thread, this);
1033   return _table->_log2_size;
1034 }
1035 
1036 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1037 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1038   shrink(Thread* thread, size_t size_limit_log2)
1039 {
1040   size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
1041   bool ret = internal_shrink(thread, tmp);
1042   return ret;
1043 }
1044 
1045 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1046 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1047   grow(Thread* thread, size_t size_limit_log2)
1048 {
1049   size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
1050   return internal_grow(thread, tmp);
1051 }
1052 
1053 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1054 template <typename LOOKUP_FUNC, typename FOUND_FUNC>
1055 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1056   get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
1057 {
1058   bool ret = false;
1059   ScopedCS cs(thread, this);
1060   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1061   if (val != NULL) {
1062     found_f(val);
1063     ret = true;
1064   }
1065   return ret;
1066 }
1067 
1068 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1069 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1070   unsafe_insert(const VALUE& value) {
1071   bool dead_hash = false;
1072   size_t hash = CONFIG::get_hash(value, &dead_hash);
1073   if (dead_hash) {
1074     return false;
1075   }
1076   // This is an unsafe operation.
1077   InternalTable* table = get_table();
1078   Bucket* bucket = get_bucket_in(table, hash);
1079   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1080   Node* new_node = Node::create_node(value, bucket->first());
1081   if (!bucket->cas_first(new_node, bucket->first())) {
1082     assert(false, "bad");
1083   }
1084   return true;
1085 }
1086 
1087 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1088 template <typename SCAN_FUNC>
1089 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1090   try_scan(Thread* thread, SCAN_FUNC& scan_f)
1091 {
1092   if (!try_resize_lock(thread)) {
1093     return false;
1094   }
1095   do_scan_locked(thread, scan_f);
1096   unlock_resize_lock(thread);
1097   return true;
1098 }
1099 
1100 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1101 template <typename SCAN_FUNC>
1102 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1103   do_scan(Thread* thread, SCAN_FUNC& scan_f)
1104 {
1105   assert(!SafepointSynchronize::is_at_safepoint(),
1106          "must be outside a safepoint");
1107   assert(_resize_lock_owner != thread, "Re-size lock held");
1108   lock_resize_lock(thread);
1109   do_scan_locked(thread, scan_f);
1110   unlock_resize_lock(thread);
1111   assert(_resize_lock_owner != thread, "Re-size lock held");
1112 }
1113 
1114 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1115 template <typename SCAN_FUNC>
1116 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1117   do_safepoint_scan(SCAN_FUNC& scan_f)
1118 {
1119   // We only allow this method to be used during a safepoint.
1120   assert(SafepointSynchronize::is_at_safepoint(),
1121          "must only be called in a safepoint");
1122   assert(Thread::current()->is_VM_thread(),
1123          "should be in vm thread");
1124 
1125   // Here we skip protection,
1126   // thus no other thread may use this table at the same time.
1127   InternalTable* table = get_table();
1128   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1129     Bucket* bucket = table->get_bucket(bucket_it);
1130     // If bucket have a redirect the items will be in the new table.
1131     // We must visit them there since the new table will contain any
1132     // concurrent inserts done after this bucket was resized.
1133     // If the bucket don't have redirect flag all items is in this table.
1134     if (!bucket->have_redirect()) {
1135       if(!visit_nodes(bucket, scan_f)) {
1136         return;
1137       }
1138     } else {
1139       assert(bucket->is_locked(), "Bucket must be locked.");
1140     }
1141   }
1142   // If there is a paused resize we also need to visit the already resized items.
1143   table = get_new_table();
1144   if (table == NULL) {
1145     return;
1146   }
1147   DEBUG_ONLY(if (table == POISON_PTR) { return; })
1148   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1149     Bucket* bucket = table->get_bucket(bucket_it);
1150     assert(!bucket->is_locked(), "Bucket must be unlocked.");
1151     if (!visit_nodes(bucket, scan_f)) {
1152       return;
1153     }
1154   }
1155 }
1156 
1157 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1158 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1159 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1160   try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1161 {
1162   if (!try_resize_lock(thread)) {
1163     return false;
1164   }
1165   do_bulk_delete_locked(thread, eval_f, del_f);
1166   unlock_resize_lock(thread);
1167   assert(_resize_lock_owner != thread, "Re-size lock held");
1168   return true;
1169 }
1170 
1171 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1172 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1173 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1174   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1175 {
1176   assert(!SafepointSynchronize::is_at_safepoint(),
1177          "must be outside a safepoint");
1178   lock_resize_lock(thread);
1179   do_bulk_delete_locked(thread, eval_f, del_f);
1180   unlock_resize_lock(thread);
1181 }
1182 
1183 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1184 template <typename VALUE_SIZE_FUNC>
1185 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1186   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1187                 outputStream* st, const char* table_name)
1188 {
1189   NumberSeq summary;
1190   size_t literal_bytes = 0;
1191   if (!try_resize_lock(thread)) {
1192     st->print_cr("statistics unavailable at this moment");
1193     return;
1194   }
1195 
1196   InternalTable* table = get_table();
1197   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1198     ScopedCS cs(thread, this);
1199     size_t count = 0;
1200     Bucket* bucket = table->get_bucket(bucket_it);
1201     if (bucket->have_redirect() || bucket->is_locked()) {
1202         continue;
1203     }
1204     Node* current_node = bucket->first();
1205     while (current_node != NULL) {
1206       ++count;
1207       literal_bytes += vs_f(current_node->value());
1208       current_node = current_node->next();
1209     }
1210     summary.add((double)count);
1211   }
1212 
1213   double num_buckets = summary.num();
1214   double num_entries = summary.sum();
1215 
1216   size_t bucket_bytes = num_buckets * sizeof(Bucket);
1217   size_t entry_bytes  = num_entries * sizeof(Node);
1218   size_t total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
1219 
1220   size_t bucket_size  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
1221   size_t entry_size   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
1222 
1223   st->print_cr("%s statistics:", table_name);
1224   st->print_cr("Number of buckets       : %9" PRIuPTR " = %9" PRIuPTR
1225                " bytes, each " SIZE_FORMAT,
1226                (size_t)num_buckets, bucket_bytes,  bucket_size);
1227   st->print_cr("Number of entries       : %9" PRIuPTR " = %9" PRIuPTR
1228                " bytes, each " SIZE_FORMAT,
1229                (size_t)num_entries, entry_bytes,   entry_size);
1230   if (literal_bytes != 0) {
1231     double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
1232     st->print_cr("Number of literals      : %9" PRIuPTR " = %9" PRIuPTR
1233                  " bytes, avg %7.3f",
1234                  (size_t)num_entries, literal_bytes, literal_avg);
1235   }
1236   st->print_cr("Total footprsize_t         : %9s = %9" PRIuPTR " bytes", ""
1237                , total_bytes);
1238   st->print_cr("Average bucket size     : %9.3f", summary.avg());
1239   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
1240   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
1241   st->print_cr("Maximum bucket size     : %9" PRIuPTR,
1242                (size_t)summary.maximum());
1243   unlock_resize_lock(thread);
1244 }
1245 
1246 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1247 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1248   try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht)
1249 {
1250   if (!try_resize_lock(thread)) {
1251     return false;
1252   }
1253   assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");
1254   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1255     Bucket* bucket = _table->get_bucket(bucket_it);
1256     assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1257     while (bucket->first() != NULL) {
1258       Node* move_node = bucket->first();
1259       bool ok = bucket->cas_first(move_node->next(), move_node);
1260       assert(ok, "Uncontended cas must work");
1261       bool dead_hash = false;
1262       size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1263       if (!dead_hash) {
1264         Bucket* insert_bucket = to_cht->get_bucket(insert_hash);
1265         assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present");
1266         move_node->set_next(insert_bucket->first());
1267         ok = insert_bucket->cas_first(move_node, insert_bucket->first());
1268         assert(ok, "Uncontended cas must work");
1269       }
1270     }
1271   }
1272   unlock_resize_lock(thread);
1273   return true;
1274 }
1275 
1276 #endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP