1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP 26 #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP 27 28 #include "memory/allocation.inline.hpp" 29 #include "runtime/atomic.hpp" 30 #include "runtime/orderAccess.hpp" 31 #include "runtime/prefetch.inline.hpp" 32 #include "utilities/concurrentHashTable.hpp" 33 #include "utilities/globalCounter.inline.hpp" 34 #include "utilities/numberSeq.hpp" 35 #include "utilities/spinYield.hpp" 36 37 // 2^30 = 1G buckets 38 #define SIZE_BIG_LOG2 30 39 // 2^5 = 32 buckets 40 #define SIZE_SMALL_LOG2 5 41 42 // Number from spinYield.hpp. In some loops SpinYield would be unfair. 43 #define SPINPAUSES_PER_YIELD 8192 44 45 #ifdef ASSERT 46 #ifdef _LP64 47 // Two low bits are not usable. 48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac); 49 #else 50 // Two low bits are not usable. 51 static const void* POISON_PTR = (void*)0xffbadbac; 52 #endif 53 #endif 54 55 // Node 56 template <typename VALUE, typename CONFIG, MEMFLAGS F> 57 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* 58 ConcurrentHashTable<VALUE, CONFIG, F>:: 59 Node::next() const 60 { 61 return OrderAccess::load_acquire(&_next); 62 } 63 64 // Bucket 65 template <typename VALUE, typename CONFIG, MEMFLAGS F> 66 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* 67 ConcurrentHashTable<VALUE, CONFIG, F>:: 68 Bucket::first_raw() const 69 { 70 return OrderAccess::load_acquire(&_first); 71 } 72 73 template <typename VALUE, typename CONFIG, MEMFLAGS F> 74 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 75 Bucket::release_assign_node_ptr( 76 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst, 77 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const 78 { 79 // Due to this assert this methods is not static. 80 assert(is_locked(), "Must be locked."); 81 Node** tmp = (Node**)dst; 82 OrderAccess::release_store(tmp, clear_set_state(node, *dst)); 83 } 84 85 template <typename VALUE, typename CONFIG, MEMFLAGS F> 86 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* 87 ConcurrentHashTable<VALUE, CONFIG, F>:: 88 Bucket::first() const 89 { 90 // We strip the states bit before returning the ptr. 91 return clear_state(OrderAccess::load_acquire(&_first)); 92 } 93 94 template <typename VALUE, typename CONFIG, MEMFLAGS F> 95 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 96 Bucket::have_redirect() const 97 { 98 return is_state(first_raw(), STATE_REDIRECT_BIT); 99 } 100 101 template <typename VALUE, typename CONFIG, MEMFLAGS F> 102 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 103 Bucket::is_locked() const 104 { 105 return is_state(first_raw(), STATE_LOCK_BIT); 106 } 107 108 template <typename VALUE, typename CONFIG, MEMFLAGS F> 109 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 110 Bucket::lock() 111 { 112 int i = 0; 113 // SpinYield would be unfair here 114 while (!this->trylock()) { 115 if ((++i) == SPINPAUSES_PER_YIELD) { 116 // On contemporary OS yielding will give CPU to another runnable thread if 117 // there is no CPU available. 118 os::naked_yield(); 119 i = 0; 120 } else { 121 SpinPause(); 122 } 123 } 124 } 125 126 template <typename VALUE, typename CONFIG, MEMFLAGS F> 127 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 128 Bucket::release_assign_last_node_next( 129 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) 130 { 131 assert(is_locked(), "Must be locked."); 132 Node* const volatile * ret = first_ptr(); 133 while (clear_state(*ret) != NULL) { 134 ret = clear_state(*ret)->next_ptr(); 135 } 136 release_assign_node_ptr(ret, node); 137 } 138 139 template <typename VALUE, typename CONFIG, MEMFLAGS F> 140 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 141 Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node, 142 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect 143 ) 144 { 145 if (is_locked()) { 146 return false; 147 } 148 if (Atomic::cmpxchg(node, &_first, expect) == expect) { 149 return true; 150 } 151 return false; 152 } 153 154 template <typename VALUE, typename CONFIG, MEMFLAGS F> 155 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 156 Bucket::trylock() 157 { 158 if (is_locked()) { 159 return false; 160 } 161 // We will expect a clean first pointer. 162 Node* tmp = first(); 163 if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) { 164 return true; 165 } 166 return false; 167 } 168 169 template <typename VALUE, typename CONFIG, MEMFLAGS F> 170 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 171 Bucket::unlock() 172 { 173 assert(is_locked(), "Must be locked."); 174 assert(!have_redirect(), 175 "Unlocking a bucket after it has reached terminal state."); 176 OrderAccess::release_store(&_first, clear_state(first())); 177 } 178 179 template <typename VALUE, typename CONFIG, MEMFLAGS F> 180 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 181 Bucket::redirect() 182 { 183 assert(is_locked(), "Must be locked."); 184 OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); 185 } 186 187 // InternalTable 188 template <typename VALUE, typename CONFIG, MEMFLAGS F> 189 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 190 InternalTable::InternalTable(size_t log2_size) 191 : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size), 192 _hash_mask(~(~((size_t)0) << _log2_size)) 193 { 194 assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2, 195 "Bad size"); 196 void* memory = NEW_C_HEAP_ARRAY(Bucket, _size, F); 197 _buckets = new (memory) Bucket[_size]; 198 } 199 200 template <typename VALUE, typename CONFIG, MEMFLAGS F> 201 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 202 InternalTable::~InternalTable() 203 { 204 FREE_C_HEAP_ARRAY(Bucket, _buckets); 205 } 206 207 // ScopedCS 208 template <typename VALUE, typename CONFIG, MEMFLAGS F> 209 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 210 ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht) 211 : _thread(thread), _cht(cht) 212 { 213 GlobalCounter::critical_section_begin(_thread); 214 // This version is published now. 215 if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) { 216 OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL); 217 } 218 } 219 220 template <typename VALUE, typename CONFIG, MEMFLAGS F> 221 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 222 ScopedCS::~ScopedCS() 223 { 224 GlobalCounter::critical_section_end(_thread); 225 } 226 227 // BaseConfig 228 template <typename VALUE, typename CONFIG, MEMFLAGS F> 229 inline void* ConcurrentHashTable<VALUE, CONFIG, F>:: 230 BaseConfig::allocate_node(size_t size, const VALUE& value) 231 { 232 return AllocateHeap(size, F); 233 } 234 235 template <typename VALUE, typename CONFIG, MEMFLAGS F> 236 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 237 BaseConfig::free_node(void* memory, const VALUE& value) 238 { 239 FreeHeap(memory); 240 } 241 242 template <typename VALUE, typename CONFIG, MEMFLAGS F> 243 template <typename LOOKUP_FUNC> 244 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>:: 245 MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint) 246 { 247 return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint); 248 } 249 250 // HaveDeletables 251 template <typename VALUE, typename CONFIG, MEMFLAGS F> 252 template <typename EVALUATE_FUNC> 253 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 254 HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket, 255 EVALUATE_FUNC& eval_f, 256 Bucket* prefetch_bucket) 257 { 258 // Instantiated for pointer type (true), so we can use prefetch. 259 // When visiting all Nodes doing this prefetch give around 30%. 260 Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL; 261 for (Node* next = bucket->first(); next != NULL ; next = next->next()) { 262 if (pref != NULL) { 263 Prefetch::read(*pref->value(), 0); 264 pref = pref->next(); 265 } 266 // Read next() Node* once. May be racing with a thread moving the next 267 // pointers. 268 Node* next_pref = next->next(); 269 if (next_pref != NULL) { 270 Prefetch::read(*next_pref->value(), 0); 271 } 272 if (eval_f(next->value())) { 273 return true; 274 } 275 } 276 return false; 277 } 278 279 template <typename VALUE, typename CONFIG, MEMFLAGS F> 280 template <bool b, typename EVALUATE_FUNC> 281 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 282 HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket, 283 EVALUATE_FUNC& eval_f, 284 Bucket* preb) 285 { 286 for (Node* next = bucket->first(); next != NULL ; next = next->next()) { 287 if (eval_f(next->value())) { 288 return true; 289 } 290 } 291 return false; 292 } 293 294 // ConcurrentHashTable 295 template <typename VALUE, typename CONFIG, MEMFLAGS F> 296 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 297 write_synchonize_on_visible_epoch(Thread* thread) 298 { 299 assert(_resize_lock_owner == thread, "Re-size lock not held"); 300 OrderAccess::fence(); // Prevent below load from floating up. 301 // If no reader saw this version we can skip write_synchronize. 302 if (OrderAccess::load_acquire(&_invisible_epoch) == thread) { 303 return; 304 } 305 assert(_invisible_epoch == NULL, "Two thread doing bulk operations"); 306 // We set this/next version that we are synchronizing for to not published. 307 // A reader will zero this flag if it reads this/next version. 308 OrderAccess::release_store(&_invisible_epoch, thread); 309 GlobalCounter::write_synchronize(); 310 } 311 312 template <typename VALUE, typename CONFIG, MEMFLAGS F> 313 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 314 try_resize_lock(Thread* locker) 315 { 316 if (_resize_lock->try_lock()) { 317 if (_resize_lock_owner != NULL) { 318 assert(locker != _resize_lock_owner, "Already own lock"); 319 // We got mutex but internal state is locked. 320 _resize_lock->unlock(); 321 return false; 322 } 323 } else { 324 return false; 325 } 326 _invisible_epoch = 0; 327 _resize_lock_owner = locker; 328 return true; 329 } 330 331 template <typename VALUE, typename CONFIG, MEMFLAGS F> 332 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 333 lock_resize_lock(Thread* locker) 334 { 335 size_t i = 0; 336 // If lock is hold by some other thread, the chances that it is return quick 337 // is low. So we will prefer yielding. 338 SpinYield yield(1, 512); 339 do { 340 _resize_lock->lock_without_safepoint_check(); 341 // If holder of lock dropped mutex for safepoint mutex might be unlocked, 342 // and _resize_lock_owner will contain the owner. 343 if (_resize_lock_owner != NULL) { 344 assert(locker != _resize_lock_owner, "Already own lock"); 345 // We got mutex but internal state is locked. 346 _resize_lock->unlock(); 347 yield.wait(); 348 } else { 349 break; 350 } 351 } while(true); 352 _resize_lock_owner = locker; 353 _invisible_epoch = 0; 354 } 355 356 template <typename VALUE, typename CONFIG, MEMFLAGS F> 357 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 358 unlock_resize_lock(Thread* locker) 359 { 360 _invisible_epoch = 0; 361 assert(locker == _resize_lock_owner, "Not unlocked by locker."); 362 _resize_lock_owner = NULL; 363 _resize_lock->unlock(); 364 } 365 366 template <typename VALUE, typename CONFIG, MEMFLAGS F> 367 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 368 free_nodes() 369 { 370 // We assume we are not MT during freeing. 371 for (size_t node_it = 0; node_it < _table->_size; node_it++) { 372 Bucket* bucket = _table->get_buckets() + node_it; 373 Node* node = bucket->first(); 374 while (node != NULL) { 375 Node* free_node = node; 376 node = node->next(); 377 Node::destroy_node(free_node); 378 } 379 } 380 } 381 382 template <typename VALUE, typename CONFIG, MEMFLAGS F> 383 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* 384 ConcurrentHashTable<VALUE, CONFIG, F>:: 385 get_table() const 386 { 387 return OrderAccess::load_acquire(&_table); 388 } 389 390 template <typename VALUE, typename CONFIG, MEMFLAGS F> 391 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* 392 ConcurrentHashTable<VALUE, CONFIG, F>:: 393 get_new_table() const 394 { 395 return OrderAccess::load_acquire(&_new_table); 396 } 397 398 template <typename VALUE, typename CONFIG, MEMFLAGS F> 399 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* 400 ConcurrentHashTable<VALUE, CONFIG, F>:: 401 set_table_from_new() 402 { 403 InternalTable* old_table = _table; 404 // Publish the new table. 405 OrderAccess::release_store(&_table, _new_table); 406 // All must see this. 407 GlobalCounter::write_synchronize(); 408 // _new_table not read any more. 409 _new_table = NULL; 410 DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;) 411 return old_table; 412 } 413 414 template <typename VALUE, typename CONFIG, MEMFLAGS F> 415 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 416 internal_grow_range(Thread* thread, size_t start, size_t stop) 417 { 418 assert(stop <= _table->_size, "Outside backing array"); 419 assert(_new_table != NULL, "Grow not proper setup before start"); 420 // The state is also copied here. Hence all buckets in new table will be 421 // locked. I call the siblings odd/even, where even have high bit 0 and odd 422 // have high bit 1. 423 for (size_t even_index = start; even_index < stop; even_index++) { 424 Bucket* bucket = _table->get_bucket(even_index); 425 426 bucket->lock(); 427 428 size_t odd_index = even_index + _table->_size; 429 _new_table->get_buckets()[even_index] = *bucket; 430 _new_table->get_buckets()[odd_index] = *bucket; 431 432 // Moves lockers go to new table, where they will wait until unlock() below. 433 bucket->redirect(); /* Must release stores above */ 434 435 // When this is done we have separated the nodes into corresponding buckets 436 // in new table. 437 if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) { 438 // If bucket is empty, unzip does nothing. 439 // We must make sure readers go to new table before we poison the bucket. 440 DEBUG_ONLY(GlobalCounter::write_synchronize();) 441 } 442 443 // Unlock for writes into the new table buckets. 444 _new_table->get_bucket(even_index)->unlock(); 445 _new_table->get_bucket(odd_index)->unlock(); 446 447 DEBUG_ONLY( 448 bucket->release_assign_node_ptr( 449 _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR); 450 ) 451 } 452 } 453 454 template <typename VALUE, typename CONFIG, MEMFLAGS F> 455 template <typename LOOKUP_FUNC, typename DELETE_FUNC> 456 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 457 internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f) 458 { 459 Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); 460 assert(bucket->is_locked(), "Must be locked."); 461 Node* const volatile * rem_n_prev = bucket->first_ptr(); 462 Node* rem_n = bucket->first(); 463 bool have_dead = false; 464 while (rem_n != NULL) { 465 if (lookup_f.equals(rem_n->value(), &have_dead)) { 466 bucket->release_assign_node_ptr(rem_n_prev, rem_n->next()); 467 break; 468 } else { 469 rem_n_prev = rem_n->next_ptr(); 470 rem_n = rem_n->next(); 471 } 472 } 473 474 bucket->unlock(); 475 476 if (rem_n == NULL) { 477 return false; 478 } 479 // Publish the deletion. 480 GlobalCounter::write_synchronize(); 481 delete_f(rem_n->value()); 482 Node::destroy_node(rem_n); 483 JFR_ONLY(_stats_rate.remove();) 484 return true; 485 } 486 487 template <typename VALUE, typename CONFIG, MEMFLAGS F> 488 template <typename EVALUATE_FUNC, typename DELETE_FUNC> 489 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 490 do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx, 491 EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt) 492 { 493 // Here we have resize lock so table is SMR safe, and there is no new 494 // table. Can do this in parallel if we want. 495 assert((is_mt && _resize_lock_owner != NULL) || 496 (!is_mt && _resize_lock_owner == thread), "Re-size lock not held"); 497 Node* ndel[BULK_DELETE_LIMIT]; 498 InternalTable* table = get_table(); 499 assert(start_idx < stop_idx, "Must be"); 500 assert(stop_idx <= _table->_size, "Must be"); 501 // Here manual do critical section since we don't want to take the cost of 502 // locking the bucket if there is nothing to delete. But we can have 503 // concurrent single deletes. The _invisible_epoch can only be used by the 504 // owner of _resize_lock, us here. There we should not changed it in our 505 // own read-side. 506 GlobalCounter::critical_section_begin(thread); 507 for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) { 508 Bucket* bucket = table->get_bucket(bucket_it); 509 Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ? 510 table->get_bucket(bucket_it+1) : NULL; 511 512 if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>:: 513 have_deletable(bucket, eval_f, prefetch_bucket)) { 514 // Nothing to remove in this bucket. 515 continue; 516 } 517 518 GlobalCounter::critical_section_end(thread); 519 // We left critical section but the bucket cannot be removed while we hold 520 // the _resize_lock. 521 bucket->lock(); 522 size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel); 523 bucket->unlock(); 524 if (is_mt) { 525 GlobalCounter::write_synchronize(); 526 } else { 527 write_synchonize_on_visible_epoch(thread); 528 } 529 for (size_t node_it = 0; node_it < nd; node_it++) { 530 del_f(ndel[node_it]->value()); 531 Node::destroy_node(ndel[node_it]); 532 JFR_ONLY(_stats_rate.remove();) 533 DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) 534 } 535 GlobalCounter::critical_section_begin(thread); 536 } 537 GlobalCounter::critical_section_end(thread); 538 } 539 540 template <typename VALUE, typename CONFIG, MEMFLAGS F> 541 template <typename LOOKUP_FUNC> 542 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 543 delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f) 544 { 545 size_t dels = 0; 546 Node* ndel[BULK_DELETE_LIMIT]; 547 Node* const volatile * rem_n_prev = bucket->first_ptr(); 548 Node* rem_n = bucket->first(); 549 while (rem_n != NULL) { 550 bool is_dead = false; 551 lookup_f.equals(rem_n->value(), &is_dead); 552 if (is_dead) { 553 ndel[dels++] = rem_n; 554 Node* next_node = rem_n->next(); 555 bucket->release_assign_node_ptr(rem_n_prev, next_node); 556 rem_n = next_node; 557 if (dels == BULK_DELETE_LIMIT) { 558 break; 559 } 560 } else { 561 rem_n_prev = rem_n->next_ptr(); 562 rem_n = rem_n->next(); 563 } 564 } 565 if (dels > 0) { 566 GlobalCounter::write_synchronize(); 567 for (size_t node_it = 0; node_it < dels; node_it++) { 568 Node::destroy_node(ndel[node_it]); 569 JFR_ONLY(_stats_rate.remove();) 570 DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) 571 } 572 } 573 } 574 575 template <typename VALUE, typename CONFIG, MEMFLAGS F> 576 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket* 577 ConcurrentHashTable<VALUE, CONFIG, F>:: 578 get_bucket(uintx hash) const 579 { 580 InternalTable* table = get_table(); 581 Bucket* bucket = get_bucket_in(table, hash); 582 if (bucket->have_redirect()) { 583 table = get_new_table(); 584 bucket = get_bucket_in(table, hash); 585 } 586 return bucket; 587 } 588 589 template <typename VALUE, typename CONFIG, MEMFLAGS F> 590 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket* 591 ConcurrentHashTable<VALUE, CONFIG, F>:: 592 get_bucket_locked(Thread* thread, const uintx hash) 593 { 594 Bucket* bucket; 595 int i = 0; 596 // SpinYield would be unfair here 597 while(true) { 598 { 599 // We need a critical section to protect the table itself. But if we fail 600 // we must leave critical section otherwise we would deadlock. 601 ScopedCS cs(thread, this); 602 bucket = get_bucket(hash); 603 if (bucket->trylock()) { 604 break; /* ends critical section */ 605 } 606 } /* ends critical section */ 607 if ((++i) == SPINPAUSES_PER_YIELD) { 608 // On contemporary OS yielding will give CPU to another runnable thread if 609 // there is no CPU available. 610 os::naked_yield(); 611 i = 0; 612 } else { 613 SpinPause(); 614 } 615 } 616 return bucket; 617 } 618 619 // Always called within critical section 620 template <typename VALUE, typename CONFIG, MEMFLAGS F> 621 template <typename LOOKUP_FUNC> 622 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* 623 ConcurrentHashTable<VALUE, CONFIG, F>:: 624 get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f, 625 bool* have_dead, size_t* loops) const 626 { 627 size_t loop_count = 0; 628 Node* node = bucket->first(); 629 while (node != NULL) { 630 bool is_dead = false; 631 ++loop_count; 632 if (lookup_f.equals(node->value(), &is_dead)) { 633 break; 634 } 635 if (is_dead && !(*have_dead)) { 636 *have_dead = true; 637 } 638 node = node->next(); 639 } 640 if (loops != NULL) { 641 *loops = loop_count; 642 } 643 return node; 644 } 645 646 template <typename VALUE, typename CONFIG, MEMFLAGS F> 647 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 648 unzip_bucket(Thread* thread, InternalTable* old_table, 649 InternalTable* new_table, size_t even_index, size_t odd_index) 650 { 651 Node* aux = old_table->get_bucket(even_index)->first(); 652 if (aux == NULL) { 653 // This is an empty bucket and in debug we poison first ptr in bucket. 654 // Therefore we must make sure no readers are looking at this bucket. 655 // If we don't do a write_synch here, caller must do it. 656 return false; 657 } 658 Node* delete_me = NULL; 659 Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr(); 660 Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr(); 661 while (aux != NULL) { 662 bool dead_hash = false; 663 size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash); 664 Node* aux_next = aux->next(); 665 if (dead_hash) { 666 delete_me = aux; 667 // This item is dead, move both list to next 668 new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, 669 aux_next); 670 new_table->get_bucket(even_index)->release_assign_node_ptr(even, 671 aux_next); 672 } else { 673 size_t aux_index = bucket_idx_hash(new_table, aux_hash); 674 if (aux_index == even_index) { 675 // This is a even, so move odd to aux/even next 676 new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, 677 aux_next); 678 // Keep in even list 679 even = aux->next_ptr(); 680 } else if (aux_index == odd_index) { 681 // This is a odd, so move odd to aux/odd next 682 new_table->get_bucket(even_index)->release_assign_node_ptr(even, 683 aux_next); 684 // Keep in odd list 685 odd = aux->next_ptr(); 686 } else { 687 fatal("aux_index does not match even or odd indices"); 688 } 689 } 690 aux = aux_next; 691 692 // We can only move 1 pointer otherwise a reader might be moved to the wrong 693 // chain. E.g. looking for even hash value but got moved to the odd bucket 694 // chain. 695 write_synchonize_on_visible_epoch(thread); 696 if (delete_me != NULL) { 697 Node::destroy_node(delete_me); 698 delete_me = NULL; 699 } 700 } 701 return true; 702 } 703 704 template <typename VALUE, typename CONFIG, MEMFLAGS F> 705 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 706 internal_shrink_prolog(Thread* thread, size_t log2_size) 707 { 708 if (!try_resize_lock(thread)) { 709 return false; 710 } 711 assert(_resize_lock_owner == thread, "Re-size lock not held"); 712 if (_table->_log2_size == _log2_start_size || 713 _table->_log2_size <= log2_size) { 714 unlock_resize_lock(thread); 715 return false; 716 } 717 _new_table = new InternalTable(_table->_log2_size - 1); 718 return true; 719 } 720 721 template <typename VALUE, typename CONFIG, MEMFLAGS F> 722 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 723 internal_shrink_epilog(Thread* thread) 724 { 725 assert(_resize_lock_owner == thread, "Re-size lock not held"); 726 727 InternalTable* old_table = set_table_from_new(); 728 _size_limit_reached = false; 729 unlock_resize_lock(thread); 730 #ifdef ASSERT 731 for (size_t i = 0; i < old_table->_size; i++) { 732 assert(old_table->get_bucket(i++)->first() == POISON_PTR, 733 "No poison found"); 734 } 735 #endif 736 // ABA safe, old_table not visible to any other threads. 737 delete old_table; 738 } 739 740 template <typename VALUE, typename CONFIG, MEMFLAGS F> 741 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 742 internal_shrink_range(Thread* thread, size_t start, size_t stop) 743 { 744 // The state is also copied here. 745 // Hence all buckets in new table will be locked. 746 for (size_t bucket_it = start; bucket_it < stop; bucket_it++) { 747 size_t even_hash_index = bucket_it; // High bit 0 748 size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1 749 750 Bucket* b_old_even = _table->get_bucket(even_hash_index); 751 Bucket* b_old_odd = _table->get_bucket(odd_hash_index); 752 753 b_old_even->lock(); 754 b_old_odd->lock(); 755 756 _new_table->get_buckets()[bucket_it] = *b_old_even; 757 758 // Put chains together. 759 _new_table->get_bucket(bucket_it)-> 760 release_assign_last_node_next(*(b_old_odd->first_ptr())); 761 762 b_old_even->redirect(); 763 b_old_odd->redirect(); 764 765 write_synchonize_on_visible_epoch(thread); 766 767 // Unlock for writes into new smaller table. 768 _new_table->get_bucket(bucket_it)->unlock(); 769 770 DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(), 771 (Node*)POISON_PTR);) 772 DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(), 773 (Node*)POISON_PTR);) 774 } 775 } 776 777 template <typename VALUE, typename CONFIG, MEMFLAGS F> 778 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 779 internal_shrink(Thread* thread, size_t log2_size) 780 { 781 if (!internal_shrink_prolog(thread, log2_size)) { 782 assert(_resize_lock_owner != thread, "Re-size lock held"); 783 return false; 784 } 785 assert(_resize_lock_owner == thread, "Should be locked by me"); 786 internal_shrink_range(thread, 0, _new_table->_size); 787 internal_shrink_epilog(thread); 788 assert(_resize_lock_owner != thread, "Re-size lock held"); 789 return true; 790 } 791 792 template <typename VALUE, typename CONFIG, MEMFLAGS F> 793 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 794 internal_grow_prolog(Thread* thread, size_t log2_size) 795 { 796 // This double checking of _size_limit_reached/is_max_size_reached() 797 // we only do in grow path, since grow means high load on table 798 // while shrink means low load. 799 if (is_max_size_reached()) { 800 return false; 801 } 802 if (!try_resize_lock(thread)) { 803 // Either we have an ongoing resize or an operation which doesn't want us 804 // to resize now. 805 return false; 806 } 807 if (is_max_size_reached() || _table->_log2_size >= log2_size) { 808 unlock_resize_lock(thread); 809 return false; 810 } 811 812 _new_table = new InternalTable(_table->_log2_size + 1); 813 814 if (_new_table->_log2_size == _log2_size_limit) { 815 _size_limit_reached = true; 816 } 817 818 return true; 819 } 820 821 template <typename VALUE, typename CONFIG, MEMFLAGS F> 822 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 823 internal_grow_epilog(Thread* thread) 824 { 825 assert(_resize_lock_owner == thread, "Should be locked"); 826 827 InternalTable* old_table = set_table_from_new(); 828 unlock_resize_lock(thread); 829 #ifdef ASSERT 830 for (size_t i = 0; i < old_table->_size; i++) { 831 assert(old_table->get_bucket(i++)->first() == POISON_PTR, 832 "No poison found"); 833 } 834 #endif 835 // ABA safe, old_table not visible to any other threads. 836 delete old_table; 837 } 838 839 template <typename VALUE, typename CONFIG, MEMFLAGS F> 840 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 841 internal_grow(Thread* thread, size_t log2_size) 842 { 843 if (!internal_grow_prolog(thread, log2_size)) { 844 assert(_resize_lock_owner != thread, "Re-size lock held"); 845 return false; 846 } 847 assert(_resize_lock_owner == thread, "Should be locked by me"); 848 internal_grow_range(thread, 0, _table->_size); 849 internal_grow_epilog(thread); 850 assert(_resize_lock_owner != thread, "Re-size lock held"); 851 return true; 852 } 853 854 // Always called within critical section 855 template <typename VALUE, typename CONFIG, MEMFLAGS F> 856 template <typename LOOKUP_FUNC> 857 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>:: 858 internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) 859 { 860 bool clean = false; 861 size_t loops = 0; 862 VALUE* ret = NULL; 863 864 const Bucket* bucket = get_bucket(lookup_f.get_hash()); 865 Node* node = get_node(bucket, lookup_f, &clean, &loops); 866 if (node != NULL) { 867 ret = node->value(); 868 } 869 if (grow_hint != NULL) { 870 *grow_hint = loops > _grow_hint; 871 } 872 873 return ret; 874 } 875 876 template <typename VALUE, typename CONFIG, MEMFLAGS F> 877 template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC> 878 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 879 internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f, 880 CALLBACK_FUNC& callback, bool* grow_hint) 881 { 882 bool ret = false; 883 bool clean = false; 884 bool locked; 885 size_t loops = 0; 886 size_t i = 0; 887 Node* new_node = NULL; 888 uintx hash = lookup_f.get_hash(); 889 while (true) { 890 { 891 ScopedCS cs(thread, this); /* protected the table/bucket */ 892 Bucket* bucket = get_bucket(hash); 893 894 Node* first_at_start = bucket->first(); 895 Node* old = get_node(bucket, lookup_f, &clean, &loops); 896 if (old == NULL) { 897 // No duplicate found. 898 if (new_node == NULL) { 899 new_node = Node::create_node(value_f(), first_at_start); 900 } else { 901 new_node->set_next(first_at_start); 902 } 903 if (bucket->cas_first(new_node, first_at_start)) { 904 JFR_ONLY(_stats_rate.add();) 905 callback(true, new_node->value()); 906 new_node = NULL; 907 ret = true; 908 break; /* leave critical section */ 909 } 910 // CAS failed we must leave critical section and retry. 911 locked = bucket->is_locked(); 912 } else { 913 // There is a duplicate. 914 callback(false, old->value()); 915 break; /* leave critical section */ 916 } 917 } /* leave critical section */ 918 i++; 919 if (locked) { 920 os::naked_yield(); 921 } else { 922 SpinPause(); 923 } 924 } 925 926 if (new_node != NULL) { 927 // CAS failed and a duplicate was inserted, we must free this node. 928 Node::destroy_node(new_node); 929 } else if (i == 0 && clean) { 930 // We only do cleaning on fast inserts. 931 Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); 932 assert(bucket->is_locked(), "Must be locked."); 933 delete_in_bucket(thread, bucket, lookup_f); 934 bucket->unlock(); 935 } 936 937 if (grow_hint != NULL) { 938 *grow_hint = loops > _grow_hint; 939 } 940 941 return ret; 942 } 943 944 template <typename VALUE, typename CONFIG, MEMFLAGS F> 945 template <typename FUNC> 946 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 947 visit_nodes(Bucket* bucket, FUNC& visitor_f) 948 { 949 Node* current_node = bucket->first(); 950 while (current_node != NULL) { 951 if (!visitor_f(current_node->value())) { 952 return false; 953 } 954 current_node = current_node->next(); 955 } 956 return true; 957 } 958 959 template <typename VALUE, typename CONFIG, MEMFLAGS F> 960 template <typename FUNC> 961 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 962 do_scan_locked(Thread* thread, FUNC& scan_f) 963 { 964 assert(_resize_lock_owner == thread, "Re-size lock not held"); 965 // We can do a critical section over the entire loop but that would block 966 // updates for a long time. Instead we choose to block resizes. 967 InternalTable* table = get_table(); 968 for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { 969 ScopedCS cs(thread, this); 970 if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) { 971 break; /* ends critical section */ 972 } 973 } /* ends critical section */ 974 } 975 976 template <typename VALUE, typename CONFIG, MEMFLAGS F> 977 template <typename EVALUATE_FUNC> 978 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>:: 979 delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, 980 size_t num_del, Node** ndel) 981 { 982 size_t dels = 0; 983 Node* const volatile * rem_n_prev = bucket->first_ptr(); 984 Node* rem_n = bucket->first(); 985 while (rem_n != NULL) { 986 if (eval_f(rem_n->value())) { 987 ndel[dels++] = rem_n; 988 Node* next_node = rem_n->next(); 989 bucket->release_assign_node_ptr(rem_n_prev, next_node); 990 rem_n = next_node; 991 if (dels == num_del) { 992 break; 993 } 994 } else { 995 rem_n_prev = rem_n->next_ptr(); 996 rem_n = rem_n->next(); 997 } 998 } 999 return dels; 1000 } 1001 1002 // Constructor 1003 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1004 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 1005 ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint) 1006 : _new_table(NULL), _log2_start_size(log2size), 1007 _log2_size_limit(log2size_limit), _grow_hint(grow_hint), 1008 _size_limit_reached(false), _resize_lock_owner(NULL), 1009 _invisible_epoch(0) 1010 { 1011 _stats_rate = TableRateStatistics(); 1012 _resize_lock = 1013 new Mutex(Mutex::leaf, "ConcurrentHashTable", false, 1014 Monitor::_safepoint_check_never); 1015 _table = new InternalTable(log2size); 1016 assert(log2size_limit >= log2size, "bad ergo"); 1017 _size_limit_reached = _table->_log2_size == _log2_size_limit; 1018 } 1019 1020 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1021 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 1022 ~ConcurrentHashTable() 1023 { 1024 delete _resize_lock; 1025 free_nodes(); 1026 delete _table; 1027 } 1028 1029 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1030 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>:: 1031 get_size_log2(Thread* thread) 1032 { 1033 ScopedCS cs(thread, this); 1034 return _table->_log2_size; 1035 } 1036 1037 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1038 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1039 shrink(Thread* thread, size_t size_limit_log2) 1040 { 1041 size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2; 1042 bool ret = internal_shrink(thread, tmp); 1043 return ret; 1044 } 1045 1046 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1047 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1048 grow(Thread* thread, size_t size_limit_log2) 1049 { 1050 size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2; 1051 return internal_grow(thread, tmp); 1052 } 1053 1054 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1055 template <typename LOOKUP_FUNC, typename FOUND_FUNC> 1056 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1057 get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint) 1058 { 1059 bool ret = false; 1060 ScopedCS cs(thread, this); 1061 VALUE* val = internal_get(thread, lookup_f, grow_hint); 1062 if (val != NULL) { 1063 found_f(val); 1064 ret = true; 1065 } 1066 return ret; 1067 } 1068 1069 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1070 template <typename LOOKUP_FUNC> 1071 inline VALUE ConcurrentHashTable<VALUE, CONFIG, F>:: 1072 get_copy(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) 1073 { 1074 ScopedCS cs(thread, this); 1075 VALUE* val = internal_get(thread, lookup_f, grow_hint); 1076 return val != NULL ? *val : CONFIG::notfound(); 1077 } 1078 1079 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1080 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1081 unsafe_insert(const VALUE& value) { 1082 bool dead_hash = false; 1083 size_t hash = CONFIG::get_hash(value, &dead_hash); 1084 if (dead_hash) { 1085 return false; 1086 } 1087 // This is an unsafe operation. 1088 InternalTable* table = get_table(); 1089 Bucket* bucket = get_bucket_in(table, hash); 1090 assert(!bucket->have_redirect() && !bucket->is_locked(), "bad"); 1091 Node* new_node = Node::create_node(value, bucket->first()); 1092 if (!bucket->cas_first(new_node, bucket->first())) { 1093 assert(false, "bad"); 1094 } 1095 JFR_ONLY(_stats_rate.add();) 1096 return true; 1097 } 1098 1099 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1100 template <typename SCAN_FUNC> 1101 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1102 try_scan(Thread* thread, SCAN_FUNC& scan_f) 1103 { 1104 if (!try_resize_lock(thread)) { 1105 return false; 1106 } 1107 do_scan_locked(thread, scan_f); 1108 unlock_resize_lock(thread); 1109 return true; 1110 } 1111 1112 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1113 template <typename SCAN_FUNC> 1114 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 1115 do_scan(Thread* thread, SCAN_FUNC& scan_f) 1116 { 1117 assert(_resize_lock_owner != thread, "Re-size lock held"); 1118 lock_resize_lock(thread); 1119 do_scan_locked(thread, scan_f); 1120 unlock_resize_lock(thread); 1121 assert(_resize_lock_owner != thread, "Re-size lock held"); 1122 } 1123 1124 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1125 template <typename EVALUATE_FUNC, typename DELETE_FUNC> 1126 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1127 try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) 1128 { 1129 if (!try_resize_lock(thread)) { 1130 return false; 1131 } 1132 do_bulk_delete_locked(thread, eval_f, del_f); 1133 unlock_resize_lock(thread); 1134 assert(_resize_lock_owner != thread, "Re-size lock held"); 1135 return true; 1136 } 1137 1138 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1139 template <typename EVALUATE_FUNC, typename DELETE_FUNC> 1140 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 1141 bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) 1142 { 1143 lock_resize_lock(thread); 1144 do_bulk_delete_locked(thread, eval_f, del_f); 1145 unlock_resize_lock(thread); 1146 } 1147 1148 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1149 template <typename VALUE_SIZE_FUNC> 1150 inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>:: 1151 statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f) 1152 { 1153 NumberSeq summary; 1154 size_t literal_bytes = 0; 1155 InternalTable* table = get_table(); 1156 for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { 1157 ScopedCS cs(thread, this); 1158 size_t count = 0; 1159 Bucket* bucket = table->get_bucket(bucket_it); 1160 if (bucket->have_redirect() || bucket->is_locked()) { 1161 continue; 1162 } 1163 Node* current_node = bucket->first(); 1164 while (current_node != NULL) { 1165 ++count; 1166 literal_bytes += vs_f(current_node->value()); 1167 current_node = current_node->next(); 1168 } 1169 summary.add((double)count); 1170 } 1171 1172 return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node)); 1173 } 1174 1175 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1176 template <typename VALUE_SIZE_FUNC> 1177 inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>:: 1178 statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old) 1179 { 1180 if (!try_resize_lock(thread)) { 1181 return old; 1182 } 1183 1184 TableStatistics ts = statistics_calculate(thread, vs_f); 1185 unlock_resize_lock(thread); 1186 1187 return ts; 1188 } 1189 1190 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1191 template <typename VALUE_SIZE_FUNC> 1192 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 1193 statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, 1194 outputStream* st, const char* table_name) 1195 { 1196 if (!try_resize_lock(thread)) { 1197 st->print_cr("statistics unavailable at this moment"); 1198 return; 1199 } 1200 1201 TableStatistics ts = statistics_calculate(thread, vs_f); 1202 unlock_resize_lock(thread); 1203 1204 ts.print(st, table_name); 1205 } 1206 1207 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1208 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1209 try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht) 1210 { 1211 if (!try_resize_lock(thread)) { 1212 return false; 1213 } 1214 assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL"); 1215 for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { 1216 Bucket* bucket = _table->get_bucket(bucket_it); 1217 assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended"); 1218 while (bucket->first() != NULL) { 1219 Node* move_node = bucket->first(); 1220 bool ok = bucket->cas_first(move_node->next(), move_node); 1221 assert(ok, "Uncontended cas must work"); 1222 bool dead_hash = false; 1223 size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash); 1224 if (!dead_hash) { 1225 Bucket* insert_bucket = to_cht->get_bucket(insert_hash); 1226 assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present"); 1227 move_node->set_next(insert_bucket->first()); 1228 ok = insert_bucket->cas_first(move_node, insert_bucket->first()); 1229 assert(ok, "Uncontended cas must work"); 1230 } 1231 } 1232 } 1233 unlock_resize_lock(thread); 1234 return true; 1235 } 1236 1237 #endif // include guard