1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP 26 #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP 27 28 #include "memory/allocation.inline.hpp" 29 #include "runtime/atomic.hpp" 30 #include "runtime/orderAccess.hpp" 31 #include "runtime/prefetch.inline.hpp" 32 #include "utilities/concurrentHashTable.hpp" 33 #include "utilities/globalCounter.inline.hpp" 34 #include "utilities/numberSeq.hpp" 35 #include "utilities/spinYield.hpp" 36 37 // 2^30 = 1G buckets 38 #define SIZE_BIG_LOG2 30 39 // 2^5 = 32 buckets 40 #define SIZE_SMALL_LOG2 5 41 42 // Number from spinYield.hpp. In some loops SpinYield would be unfair. 43 #define SPINPAUSES_PER_YIELD 8192 44 45 #ifdef ASSERT 46 #ifdef _LP64 47 // Two low bits are not usable. 48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac); 49 #else 50 // Two low bits are not usable. 51 static const void* POISON_PTR = (void*)0xffbadbac; 52 #endif 53 #endif 54 55 // Node 56 template <typename VALUE, typename CONFIG, MEMFLAGS F> 57 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* 58 ConcurrentHashTable<VALUE, CONFIG, F>:: 59 Node::next() const 60 { 61 return OrderAccess::load_acquire(&_next); 62 } 63 64 // Bucket 65 template <typename VALUE, typename CONFIG, MEMFLAGS F> 66 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* 67 ConcurrentHashTable<VALUE, CONFIG, F>:: 68 Bucket::first_raw() const 69 { 70 return OrderAccess::load_acquire(&_first); 71 } 72 73 template <typename VALUE, typename CONFIG, MEMFLAGS F> 74 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 75 Bucket::release_assign_node_ptr( 76 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst, 77 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const 78 { 79 // Due to this assert this methods is not static. 80 assert(is_locked(), "Must be locked."); 81 Node** tmp = (Node**)dst; 82 OrderAccess::release_store(tmp, clear_set_state(node, *dst)); 83 } 84 85 template <typename VALUE, typename CONFIG, MEMFLAGS F> 86 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* 87 ConcurrentHashTable<VALUE, CONFIG, F>:: 88 Bucket::first() const 89 { 90 // We strip the states bit before returning the ptr. 91 return clear_state(OrderAccess::load_acquire(&_first)); 92 } 93 94 template <typename VALUE, typename CONFIG, MEMFLAGS F> 95 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 96 Bucket::have_redirect() const 97 { 98 return is_state(first_raw(), STATE_REDIRECT_BIT); 99 } 100 101 template <typename VALUE, typename CONFIG, MEMFLAGS F> 102 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 103 Bucket::is_locked() const 104 { 105 return is_state(first_raw(), STATE_LOCK_BIT); 106 } 107 108 template <typename VALUE, typename CONFIG, MEMFLAGS F> 109 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 110 Bucket::lock() 111 { 112 int i = 0; 113 // SpinYield would be unfair here 114 while (!this->trylock()) { 115 if ((++i) == SPINPAUSES_PER_YIELD) { 116 // On contemporary OS yielding will give CPU to another runnable thread if 117 // there is no CPU available. 118 os::naked_yield(); 119 i = 0; 120 } else { 121 SpinPause(); 122 } 123 } 124 } 125 126 template <typename VALUE, typename CONFIG, MEMFLAGS F> 127 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 128 Bucket::release_assign_last_node_next( 129 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) 130 { 131 assert(is_locked(), "Must be locked."); 132 Node* const volatile * ret = first_ptr(); 133 while (clear_state(*ret) != NULL) { 134 ret = clear_state(*ret)->next_ptr(); 135 } 136 release_assign_node_ptr(ret, node); 137 } 138 139 template <typename VALUE, typename CONFIG, MEMFLAGS F> 140 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 141 Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node, 142 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect 143 ) 144 { 145 if (is_locked()) { 146 return false; 147 } 148 if (Atomic::cmpxchg(node, &_first, expect) == expect) { 149 return true; 150 } 151 return false; 152 } 153 154 template <typename VALUE, typename CONFIG, MEMFLAGS F> 155 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 156 Bucket::trylock() 157 { 158 if (is_locked()) { 159 return false; 160 } 161 // We will expect a clean first pointer. 162 Node* tmp = first(); 163 if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) { 164 return true; 165 } 166 return false; 167 } 168 169 template <typename VALUE, typename CONFIG, MEMFLAGS F> 170 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 171 Bucket::unlock() 172 { 173 assert(is_locked(), "Must be locked."); 174 assert(!have_redirect(), 175 "Unlocking a bucket after it has reached terminal state."); 176 OrderAccess::release_store(&_first, clear_state(first())); 177 } 178 179 template <typename VALUE, typename CONFIG, MEMFLAGS F> 180 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 181 Bucket::redirect() 182 { 183 assert(is_locked(), "Must be locked."); 184 OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); 185 } 186 187 // InternalTable 188 template <typename VALUE, typename CONFIG, MEMFLAGS F> 189 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 190 InternalTable::InternalTable(size_t log2_size) 191 : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size), 192 _hash_mask(~(~((size_t)0) << _log2_size)) 193 { 194 assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2, 195 "Bad size"); 196 void* memory = NEW_C_HEAP_ARRAY(Bucket, _size, F); 197 _buckets = new (memory) Bucket[_size]; 198 } 199 200 template <typename VALUE, typename CONFIG, MEMFLAGS F> 201 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 202 InternalTable::~InternalTable() 203 { 204 FREE_C_HEAP_ARRAY(Bucket, _buckets); 205 } 206 207 // ScopedCS 208 template <typename VALUE, typename CONFIG, MEMFLAGS F> 209 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 210 ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht) 211 : _thread(thread), _cht(cht) 212 { 213 GlobalCounter::critical_section_begin(_thread); 214 // This version is published now. 215 if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) { 216 OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL); 217 } 218 } 219 220 template <typename VALUE, typename CONFIG, MEMFLAGS F> 221 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 222 ScopedCS::~ScopedCS() 223 { 224 GlobalCounter::critical_section_end(_thread); 225 } 226 227 // BaseConfig 228 template <typename VALUE, typename CONFIG, MEMFLAGS F> 229 inline void* ConcurrentHashTable<VALUE, CONFIG, F>:: 230 BaseConfig::allocate_node(size_t size, const VALUE& value) 231 { 232 return AllocateHeap(size, F); 233 } 234 235 template <typename VALUE, typename CONFIG, MEMFLAGS F> 236 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 237 BaseConfig::free_node(void* memory, const VALUE& value) 238 { 239 FreeHeap(memory); 240 } 241 242 template <typename VALUE, typename CONFIG, MEMFLAGS F> 243 template <typename LOOKUP_FUNC> 244 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>:: 245 MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint) 246 { 247 return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint); 248 } 249 250 // HaveDeletables 251 template <typename VALUE, typename CONFIG, MEMFLAGS F> 252 template <typename EVALUATE_FUNC> 253 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 254 HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket, 255 EVALUATE_FUNC& eval_f, 256 Bucket* prefetch_bucket) 257 { 258 // Instantiated for pointer type (true), so we can use prefetch. 259 // When visiting all Nodes doing this prefetch give around 30%. 260 Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL; 261 for (Node* next = bucket->first(); next != NULL ; next = next->next()) { 262 if (pref != NULL) { 263 Prefetch::read(*pref->value(), 0); 264 pref = pref->next(); 265 } 266 if (next->next() != NULL) { 267 Prefetch::read(*next->next()->value(), 0); 268 } 269 if (eval_f(next->value())) { 270 return true; 271 } 272 } 273 return false; 274 } 275 276 template <typename VALUE, typename CONFIG, MEMFLAGS F> 277 template <bool b, typename EVALUATE_FUNC> 278 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 279 HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket, 280 EVALUATE_FUNC& eval_f, 281 Bucket* preb) 282 { 283 for (Node* next = bucket->first(); next != NULL ; next = next->next()) { 284 if (eval_f(next->value())) { 285 return true; 286 } 287 } 288 return false; 289 } 290 291 // ConcurrentHashTable 292 template <typename VALUE, typename CONFIG, MEMFLAGS F> 293 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 294 write_synchonize_on_visible_epoch(Thread* thread) 295 { 296 assert(_resize_lock_owner == thread, "Re-size lock not held"); 297 OrderAccess::fence(); // Prevent below load from floating up. 298 // If no reader saw this version we can skip write_synchronize. 299 if (OrderAccess::load_acquire(&_invisible_epoch) == thread) { 300 return; 301 } 302 assert(_invisible_epoch == NULL, "Two thread doing bulk operations"); 303 // We set this/next version that we are synchronizing for to not published. 304 // A reader will zero this flag if it reads this/next version. 305 OrderAccess::release_store(&_invisible_epoch, thread); 306 GlobalCounter::write_synchronize(); 307 } 308 309 template <typename VALUE, typename CONFIG, MEMFLAGS F> 310 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 311 try_resize_lock(Thread* locker) 312 { 313 if (_resize_lock->try_lock()) { 314 if (_resize_lock_owner != NULL) { 315 assert(locker != _resize_lock_owner, "Already own lock"); 316 // We got mutex but internal state is locked. 317 _resize_lock->unlock(); 318 return false; 319 } 320 } else { 321 return false; 322 } 323 _invisible_epoch = 0; 324 _resize_lock_owner = locker; 325 return true; 326 } 327 328 template <typename VALUE, typename CONFIG, MEMFLAGS F> 329 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 330 lock_resize_lock(Thread* locker) 331 { 332 size_t i = 0; 333 // If lock is hold by some other thread, the chances that it is return quick 334 // is low. So we will prefer yielding. 335 SpinYield yield(1, 512); 336 do { 337 _resize_lock->lock_without_safepoint_check(); 338 // If holder of lock dropped mutex for safepoint mutex might be unlocked, 339 // and _resize_lock_owner will contain the owner. 340 if (_resize_lock_owner != NULL) { 341 assert(locker != _resize_lock_owner, "Already own lock"); 342 // We got mutex but internal state is locked. 343 _resize_lock->unlock(); 344 yield.wait(); 345 } else { 346 break; 347 } 348 } while(true); 349 _resize_lock_owner = locker; 350 _invisible_epoch = 0; 351 } 352 353 template <typename VALUE, typename CONFIG, MEMFLAGS F> 354 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 355 unlock_resize_lock(Thread* locker) 356 { 357 _invisible_epoch = 0; 358 assert(locker == _resize_lock_owner, "Not unlocked by locker."); 359 _resize_lock_owner = NULL; 360 _resize_lock->unlock(); 361 } 362 363 template <typename VALUE, typename CONFIG, MEMFLAGS F> 364 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 365 free_nodes() 366 { 367 // We assume we are not MT during freeing. 368 for (size_t node_it = 0; node_it < _table->_size; node_it++) { 369 Bucket* bucket = _table->get_buckets() + node_it; 370 Node* node = bucket->first(); 371 while (node != NULL) { 372 Node* free_node = node; 373 node = node->next(); 374 Node::destroy_node(free_node); 375 } 376 } 377 } 378 379 template <typename VALUE, typename CONFIG, MEMFLAGS F> 380 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* 381 ConcurrentHashTable<VALUE, CONFIG, F>:: 382 get_table() const 383 { 384 return OrderAccess::load_acquire(&_table); 385 } 386 387 template <typename VALUE, typename CONFIG, MEMFLAGS F> 388 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* 389 ConcurrentHashTable<VALUE, CONFIG, F>:: 390 get_new_table() const 391 { 392 return OrderAccess::load_acquire(&_new_table); 393 } 394 395 template <typename VALUE, typename CONFIG, MEMFLAGS F> 396 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* 397 ConcurrentHashTable<VALUE, CONFIG, F>:: 398 set_table_from_new() 399 { 400 InternalTable* old_table = _table; 401 // Publish the new table. 402 OrderAccess::release_store(&_table, _new_table); 403 // All must see this. 404 GlobalCounter::write_synchronize(); 405 // _new_table not read any more. 406 _new_table = NULL; 407 DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;) 408 return old_table; 409 } 410 411 template <typename VALUE, typename CONFIG, MEMFLAGS F> 412 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 413 internal_grow_range(Thread* thread, size_t start, size_t stop) 414 { 415 assert(stop <= _table->_size, "Outside backing array"); 416 assert(_new_table != NULL, "Grow not proper setup before start"); 417 // The state is also copied here. Hence all buckets in new table will be 418 // locked. I call the siblings odd/even, where even have high bit 0 and odd 419 // have high bit 1. 420 for (size_t even_index = start; even_index < stop; even_index++) { 421 Bucket* bucket = _table->get_bucket(even_index); 422 423 bucket->lock(); 424 425 size_t odd_index = even_index + _table->_size; 426 _new_table->get_buckets()[even_index] = *bucket; 427 _new_table->get_buckets()[odd_index] = *bucket; 428 429 // Moves lockers go to new table, where they will wait until unlock() below. 430 bucket->redirect(); /* Must release stores above */ 431 432 // When this is done we have separated the nodes into corresponding buckets 433 // in new table. 434 if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) { 435 // If bucket is empty, unzip does nothing. 436 // We must make sure readers go to new table before we poison the bucket. 437 DEBUG_ONLY(GlobalCounter::write_synchronize();) 438 } 439 440 // Unlock for writes into the new table buckets. 441 _new_table->get_bucket(even_index)->unlock(); 442 _new_table->get_bucket(odd_index)->unlock(); 443 444 DEBUG_ONLY( 445 bucket->release_assign_node_ptr( 446 _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR); 447 ) 448 } 449 } 450 451 template <typename VALUE, typename CONFIG, MEMFLAGS F> 452 template <typename LOOKUP_FUNC, typename DELETE_FUNC> 453 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 454 internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f) 455 { 456 Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); 457 assert(bucket->is_locked(), "Must be locked."); 458 Node* const volatile * rem_n_prev = bucket->first_ptr(); 459 Node* rem_n = bucket->first(); 460 bool have_dead = false; 461 while (rem_n != NULL) { 462 if (lookup_f.equals(rem_n->value(), &have_dead)) { 463 bucket->release_assign_node_ptr(rem_n_prev, rem_n->next()); 464 break; 465 } else { 466 rem_n_prev = rem_n->next_ptr(); 467 rem_n = rem_n->next(); 468 } 469 } 470 471 bucket->unlock(); 472 473 if (rem_n == NULL) { 474 return false; 475 } 476 // Publish the deletion. 477 GlobalCounter::write_synchronize(); 478 delete_f(rem_n->value()); 479 Node::destroy_node(rem_n); 480 return true; 481 } 482 483 template <typename VALUE, typename CONFIG, MEMFLAGS F> 484 template <typename EVALUATE_FUNC, typename DELETE_FUNC> 485 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 486 do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx, 487 EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt) 488 { 489 // Here we have resize lock so table is SMR safe, and there is no new 490 // table. Can do this in parallel if we want. 491 assert((is_mt && _resize_lock_owner != NULL) || 492 (!is_mt && _resize_lock_owner == thread), "Re-size lock not held"); 493 Node* ndel[BULK_DELETE_LIMIT]; 494 InternalTable* table = get_table(); 495 assert(start_idx < stop_idx, "Must be"); 496 assert(stop_idx <= _table->_size, "Must be"); 497 // Here manual do critical section since we don't want to take the cost of 498 // locking the bucket if there is nothing to delete. But we can have 499 // concurrent single deletes. The _invisible_epoch can only be used by the 500 // owner of _resize_lock, us here. There we should not changed it in our 501 // own read-side. 502 GlobalCounter::critical_section_begin(thread); 503 for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) { 504 Bucket* bucket = table->get_bucket(bucket_it); 505 Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ? 506 table->get_bucket(bucket_it+1) : NULL; 507 508 if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>:: 509 have_deletable(bucket, eval_f, prefetch_bucket)) { 510 // Nothing to remove in this bucket. 511 continue; 512 } 513 514 GlobalCounter::critical_section_end(thread); 515 // We left critical section but the bucket cannot be removed while we hold 516 // the _resize_lock. 517 bucket->lock(); 518 size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel); 519 bucket->unlock(); 520 if (is_mt) { 521 GlobalCounter::write_synchronize(); 522 } else { 523 write_synchonize_on_visible_epoch(thread); 524 } 525 for (size_t node_it = 0; node_it < nd; node_it++) { 526 del_f(ndel[node_it]->value()); 527 Node::destroy_node(ndel[node_it]); 528 DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) 529 } 530 GlobalCounter::critical_section_begin(thread); 531 } 532 GlobalCounter::critical_section_end(thread); 533 } 534 535 template <typename VALUE, typename CONFIG, MEMFLAGS F> 536 template <typename LOOKUP_FUNC> 537 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 538 delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f) 539 { 540 size_t dels = 0; 541 Node* ndel[BULK_DELETE_LIMIT]; 542 Node* const volatile * rem_n_prev = bucket->first_ptr(); 543 Node* rem_n = bucket->first(); 544 while (rem_n != NULL) { 545 bool is_dead = false; 546 lookup_f.equals(rem_n->value(), &is_dead); 547 if (is_dead) { 548 ndel[dels++] = rem_n; 549 bucket->release_assign_node_ptr(rem_n_prev, rem_n->next()); 550 rem_n = rem_n->next(); 551 if (dels == BULK_DELETE_LIMIT) { 552 break; 553 } 554 } else { 555 rem_n_prev = rem_n->next_ptr(); 556 rem_n = rem_n->next(); 557 } 558 } 559 if (dels > 0) { 560 GlobalCounter::write_synchronize(); 561 for (size_t node_it = 0; node_it < dels; node_it++) { 562 Node::destroy_node(ndel[node_it]); 563 DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) 564 } 565 } 566 } 567 568 template <typename VALUE, typename CONFIG, MEMFLAGS F> 569 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket* 570 ConcurrentHashTable<VALUE, CONFIG, F>:: 571 get_bucket(uintx hash) const 572 { 573 InternalTable* table = get_table(); 574 Bucket* bucket = get_bucket_in(table, hash); 575 if (bucket->have_redirect()) { 576 table = get_new_table(); 577 bucket = get_bucket_in(table, hash); 578 } 579 return bucket; 580 } 581 582 template <typename VALUE, typename CONFIG, MEMFLAGS F> 583 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket* 584 ConcurrentHashTable<VALUE, CONFIG, F>:: 585 get_bucket_locked(Thread* thread, const uintx hash) 586 { 587 Bucket* bucket; 588 int i = 0; 589 // SpinYield would be unfair here 590 while(true) { 591 { 592 // We need a critical section to protect the table itself. But if we fail 593 // we must leave critical section otherwise we would deadlock. 594 ScopedCS cs(thread, this); 595 bucket = get_bucket(hash); 596 if (bucket->trylock()) { 597 break; /* ends critical section */ 598 } 599 } /* ends critical section */ 600 if ((++i) == SPINPAUSES_PER_YIELD) { 601 // On contemporary OS yielding will give CPU to another runnable thread if 602 // there is no CPU available. 603 os::naked_yield(); 604 i = 0; 605 } else { 606 SpinPause(); 607 } 608 } 609 return bucket; 610 } 611 612 // Always called within critical section 613 template <typename VALUE, typename CONFIG, MEMFLAGS F> 614 template <typename LOOKUP_FUNC> 615 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* 616 ConcurrentHashTable<VALUE, CONFIG, F>:: 617 get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f, 618 bool* have_dead, size_t* loops) const 619 { 620 size_t loop_count = 0; 621 Node* node = bucket->first(); 622 while (node != NULL) { 623 bool is_dead = false; 624 ++loop_count; 625 if (lookup_f.equals(node->value(), &is_dead)) { 626 break; 627 } 628 if (is_dead && !(*have_dead)) { 629 *have_dead = true; 630 } 631 node = node->next(); 632 } 633 if (loops != NULL) { 634 *loops = loop_count; 635 } 636 return node; 637 } 638 639 template <typename VALUE, typename CONFIG, MEMFLAGS F> 640 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 641 unzip_bucket(Thread* thread, InternalTable* old_table, 642 InternalTable* new_table, size_t even_index, size_t odd_index) 643 { 644 Node* aux = old_table->get_bucket(even_index)->first(); 645 if (aux == NULL) { 646 // This is an empty bucket and in debug we poison first ptr in bucket. 647 // Therefore we must make sure no readers are looking at this bucket. 648 // If we don't do a write_synch here, caller must do it. 649 return false; 650 } 651 Node* delete_me = NULL; 652 Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr(); 653 Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr(); 654 while (aux != NULL) { 655 bool dead_hash = false; 656 size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash); 657 if (dead_hash) { 658 delete_me = aux; 659 // This item is dead, move both list to next 660 new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, 661 aux->next()); 662 new_table->get_bucket(even_index)->release_assign_node_ptr(even, 663 aux->next()); 664 } else { 665 size_t aux_index = bucket_idx_hash(new_table, aux_hash); 666 if (aux_index == even_index) { 667 // This is a even, so move odd to aux/even next 668 new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, 669 aux->next()); 670 // Keep in even list 671 even = aux->next_ptr(); 672 } else if (aux_index == odd_index) { 673 // This is a odd, so move odd to aux/odd next 674 new_table->get_bucket(even_index)->release_assign_node_ptr(even, 675 aux->next()); 676 // Keep in odd list 677 odd = aux->next_ptr(); 678 } else { 679 fatal("aux_index does not match even or odd indices"); 680 } 681 } 682 aux = aux->next(); 683 684 // We can only move 1 pointer otherwise a reader might be moved to the wrong 685 // chain. E.g. looking for even hash value but got moved to the odd bucket 686 // chain. 687 write_synchonize_on_visible_epoch(thread); 688 if (delete_me != NULL) { 689 Node::destroy_node(delete_me); 690 delete_me = NULL; 691 } 692 } 693 return true; 694 } 695 696 template <typename VALUE, typename CONFIG, MEMFLAGS F> 697 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 698 internal_shrink_prolog(Thread* thread, size_t log2_size) 699 { 700 if (!try_resize_lock(thread)) { 701 return false; 702 } 703 assert(_resize_lock_owner == thread, "Re-size lock not held"); 704 if (_table->_log2_size == _log2_start_size || 705 _table->_log2_size <= log2_size) { 706 unlock_resize_lock(thread); 707 return false; 708 } 709 _new_table = new InternalTable(_table->_log2_size - 1); 710 return true; 711 } 712 713 template <typename VALUE, typename CONFIG, MEMFLAGS F> 714 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 715 internal_shrink_epilog(Thread* thread) 716 { 717 assert(_resize_lock_owner == thread, "Re-size lock not held"); 718 719 InternalTable* old_table = set_table_from_new(); 720 _size_limit_reached = false; 721 unlock_resize_lock(thread); 722 #ifdef ASSERT 723 for (size_t i = 0; i < old_table->_size; i++) { 724 assert(old_table->get_bucket(i++)->first() == POISON_PTR, 725 "No poison found"); 726 } 727 #endif 728 // ABA safe, old_table not visible to any other threads. 729 delete old_table; 730 } 731 732 template <typename VALUE, typename CONFIG, MEMFLAGS F> 733 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 734 internal_shrink_range(Thread* thread, size_t start, size_t stop) 735 { 736 // The state is also copied here. 737 // Hence all buckets in new table will be locked. 738 for (size_t bucket_it = start; bucket_it < stop; bucket_it++) { 739 size_t even_hash_index = bucket_it; // High bit 0 740 size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1 741 742 Bucket* b_old_even = _table->get_bucket(even_hash_index); 743 Bucket* b_old_odd = _table->get_bucket(odd_hash_index); 744 745 b_old_even->lock(); 746 b_old_odd->lock(); 747 748 _new_table->get_buckets()[bucket_it] = *b_old_even; 749 750 // Put chains together. 751 _new_table->get_bucket(bucket_it)-> 752 release_assign_last_node_next(*(b_old_odd->first_ptr())); 753 754 b_old_even->redirect(); 755 b_old_odd->redirect(); 756 757 write_synchonize_on_visible_epoch(thread); 758 759 // Unlock for writes into new smaller table. 760 _new_table->get_bucket(bucket_it)->unlock(); 761 762 DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(), 763 (Node*)POISON_PTR);) 764 DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(), 765 (Node*)POISON_PTR);) 766 } 767 } 768 769 template <typename VALUE, typename CONFIG, MEMFLAGS F> 770 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 771 internal_shrink(Thread* thread, size_t log2_size) 772 { 773 if (!internal_shrink_prolog(thread, log2_size)) { 774 assert(_resize_lock_owner != thread, "Re-size lock held"); 775 return false; 776 } 777 assert(_resize_lock_owner == thread, "Should be locked by me"); 778 internal_shrink_range(thread, 0, _new_table->_size); 779 internal_shrink_epilog(thread); 780 assert(_resize_lock_owner != thread, "Re-size lock held"); 781 return true; 782 } 783 784 template <typename VALUE, typename CONFIG, MEMFLAGS F> 785 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 786 internal_grow_prolog(Thread* thread, size_t log2_size) 787 { 788 // This double checking of _size_limit_reached/is_max_size_reached() 789 // we only do in grow path, since grow means high load on table 790 // while shrink means low load. 791 if (is_max_size_reached()) { 792 return false; 793 } 794 if (!try_resize_lock(thread)) { 795 // Either we have an ongoing resize or an operation which doesn't want us 796 // to resize now. 797 return false; 798 } 799 if (is_max_size_reached() || _table->_log2_size >= log2_size) { 800 unlock_resize_lock(thread); 801 return false; 802 } 803 804 _new_table = new InternalTable(_table->_log2_size + 1); 805 806 if (_new_table->_log2_size == _log2_size_limit) { 807 _size_limit_reached = true; 808 } 809 810 return true; 811 } 812 813 template <typename VALUE, typename CONFIG, MEMFLAGS F> 814 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 815 internal_grow_epilog(Thread* thread) 816 { 817 assert(_resize_lock_owner == thread, "Should be locked"); 818 819 InternalTable* old_table = set_table_from_new(); 820 unlock_resize_lock(thread); 821 #ifdef ASSERT 822 for (size_t i = 0; i < old_table->_size; i++) { 823 assert(old_table->get_bucket(i++)->first() == POISON_PTR, 824 "No poison found"); 825 } 826 #endif 827 // ABA safe, old_table not visible to any other threads. 828 delete old_table; 829 } 830 831 template <typename VALUE, typename CONFIG, MEMFLAGS F> 832 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 833 internal_grow(Thread* thread, size_t log2_size) 834 { 835 if (!internal_grow_prolog(thread, log2_size)) { 836 assert(_resize_lock_owner != thread, "Re-size lock held"); 837 return false; 838 } 839 assert(_resize_lock_owner == thread, "Should be locked by me"); 840 internal_grow_range(thread, 0, _table->_size); 841 internal_grow_epilog(thread); 842 assert(_resize_lock_owner != thread, "Re-size lock held"); 843 return true; 844 } 845 846 // Always called within critical section 847 template <typename VALUE, typename CONFIG, MEMFLAGS F> 848 template <typename LOOKUP_FUNC> 849 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>:: 850 internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) 851 { 852 bool clean = false; 853 size_t loops = 0; 854 VALUE* ret = NULL; 855 856 const Bucket* bucket = get_bucket(lookup_f.get_hash()); 857 Node* node = get_node(bucket, lookup_f, &clean, &loops); 858 if (node != NULL) { 859 ret = node->value(); 860 } 861 if (grow_hint != NULL) { 862 *grow_hint = loops > _grow_hint; 863 } 864 865 return ret; 866 } 867 868 template <typename VALUE, typename CONFIG, MEMFLAGS F> 869 template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC> 870 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 871 internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f, 872 CALLBACK_FUNC& callback, bool* grow_hint) 873 { 874 bool ret = false; 875 bool clean = false; 876 bool locked; 877 size_t loops = 0; 878 size_t i = 0; 879 Node* new_node = NULL; 880 uintx hash = lookup_f.get_hash(); 881 while (true) { 882 { 883 ScopedCS cs(thread, this); /* protected the table/bucket */ 884 Bucket* bucket = get_bucket(hash); 885 886 Node* first_at_start = bucket->first(); 887 Node* old = get_node(bucket, lookup_f, &clean, &loops); 888 if (old == NULL) { 889 // No duplicate found. 890 if (new_node == NULL) { 891 new_node = Node::create_node(value_f(), first_at_start); 892 } else { 893 new_node->set_next(first_at_start); 894 } 895 if (bucket->cas_first(new_node, first_at_start)) { 896 callback(true, new_node->value()); 897 new_node = NULL; 898 ret = true; 899 break; /* leave critical section */ 900 } 901 // CAS failed we must leave critical section and retry. 902 locked = bucket->is_locked(); 903 } else { 904 // There is a duplicate. 905 callback(false, old->value()); 906 break; /* leave critical section */ 907 } 908 } /* leave critical section */ 909 i++; 910 if (locked) { 911 os::naked_yield(); 912 } else { 913 SpinPause(); 914 } 915 } 916 917 if (new_node != NULL) { 918 // CAS failed and a duplicate was inserted, we must free this node. 919 Node::destroy_node(new_node); 920 } else if (i == 0 && clean) { 921 // We only do cleaning on fast inserts. 922 Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); 923 assert(bucket->is_locked(), "Must be locked."); 924 delete_in_bucket(thread, bucket, lookup_f); 925 bucket->unlock(); 926 } 927 928 if (grow_hint != NULL) { 929 *grow_hint = loops > _grow_hint; 930 } 931 932 return ret; 933 } 934 935 template <typename VALUE, typename CONFIG, MEMFLAGS F> 936 template <typename FUNC> 937 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 938 visit_nodes(Bucket* bucket, FUNC& visitor_f) 939 { 940 Node* current_node = bucket->first(); 941 while (current_node != NULL) { 942 if (!visitor_f(current_node->value())) { 943 return false; 944 } 945 current_node = current_node->next(); 946 } 947 return true; 948 } 949 950 template <typename VALUE, typename CONFIG, MEMFLAGS F> 951 template <typename FUNC> 952 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 953 do_scan_locked(Thread* thread, FUNC& scan_f) 954 { 955 assert(_resize_lock_owner == thread, "Re-size lock not held"); 956 // We can do a critical section over the entire loop but that would block 957 // updates for a long time. Instead we choose to block resizes. 958 InternalTable* table = get_table(); 959 for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { 960 ScopedCS cs(thread, this); 961 if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) { 962 break; /* ends critical section */ 963 } 964 } /* ends critical section */ 965 } 966 967 template <typename VALUE, typename CONFIG, MEMFLAGS F> 968 template <typename EVALUATE_FUNC> 969 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>:: 970 delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, 971 size_t num_del, Node** ndel) 972 { 973 size_t dels = 0; 974 Node* const volatile * rem_n_prev = bucket->first_ptr(); 975 Node* rem_n = bucket->first(); 976 while (rem_n != NULL) { 977 if (eval_f(rem_n->value())) { 978 ndel[dels++] = rem_n; 979 bucket->release_assign_node_ptr(rem_n_prev, rem_n->next()); 980 rem_n = rem_n->next(); 981 if (dels == num_del) { 982 break; 983 } 984 } else { 985 rem_n_prev = rem_n->next_ptr(); 986 rem_n = rem_n->next(); 987 } 988 } 989 return dels; 990 } 991 992 // Constructor 993 template <typename VALUE, typename CONFIG, MEMFLAGS F> 994 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 995 ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint) 996 : _new_table(NULL), _log2_start_size(log2size), 997 _log2_size_limit(log2size_limit), _grow_hint(grow_hint), 998 _size_limit_reached(false), _resize_lock_owner(NULL), 999 _invisible_epoch(0) 1000 { 1001 _resize_lock = 1002 new Mutex(Mutex::leaf, "ConcurrentHashTable", false, 1003 Monitor::_safepoint_check_never); 1004 _table = new InternalTable(log2size); 1005 assert(log2size_limit >= log2size, "bad ergo"); 1006 _size_limit_reached = _table->_log2_size == _log2_size_limit; 1007 } 1008 1009 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1010 inline ConcurrentHashTable<VALUE, CONFIG, F>:: 1011 ~ConcurrentHashTable() 1012 { 1013 delete _resize_lock; 1014 free_nodes(); 1015 delete _table; 1016 } 1017 1018 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1019 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>:: 1020 get_size_log2(Thread* thread) 1021 { 1022 ScopedCS cs(thread, this); 1023 return _table->_log2_size; 1024 } 1025 1026 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1027 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1028 shrink(Thread* thread, size_t size_limit_log2) 1029 { 1030 size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2; 1031 bool ret = internal_shrink(thread, tmp); 1032 return ret; 1033 } 1034 1035 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1036 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1037 grow(Thread* thread, size_t size_limit_log2) 1038 { 1039 size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2; 1040 return internal_grow(thread, tmp); 1041 } 1042 1043 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1044 template <typename LOOKUP_FUNC, typename FOUND_FUNC> 1045 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1046 get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint) 1047 { 1048 bool ret = false; 1049 ScopedCS cs(thread, this); 1050 VALUE* val = internal_get(thread, lookup_f, grow_hint); 1051 if (val != NULL) { 1052 found_f(val); 1053 ret = true; 1054 } 1055 return ret; 1056 } 1057 1058 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1059 template <typename LOOKUP_FUNC> 1060 inline VALUE ConcurrentHashTable<VALUE, CONFIG, F>:: 1061 get_copy(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) 1062 { 1063 ScopedCS cs(thread, this); 1064 VALUE* val = internal_get(thread, lookup_f, grow_hint); 1065 return val != NULL ? *val : CONFIG::notfound(); 1066 } 1067 1068 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1069 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1070 unsafe_insert(const VALUE& value) { 1071 bool dead_hash = false; 1072 size_t hash = CONFIG::get_hash(value, &dead_hash); 1073 if (dead_hash) { 1074 return false; 1075 } 1076 // This is an unsafe operation. 1077 InternalTable* table = get_table(); 1078 Bucket* bucket = get_bucket_in(table, hash); 1079 assert(!bucket->have_redirect() && !bucket->is_locked(), "bad"); 1080 Node* new_node = Node::create_node(value, bucket->first()); 1081 if (!bucket->cas_first(new_node, bucket->first())) { 1082 assert(false, "bad"); 1083 } 1084 return true; 1085 } 1086 1087 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1088 template <typename SCAN_FUNC> 1089 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1090 try_scan(Thread* thread, SCAN_FUNC& scan_f) 1091 { 1092 if (!try_resize_lock(thread)) { 1093 return false; 1094 } 1095 do_scan_locked(thread, scan_f); 1096 unlock_resize_lock(thread); 1097 return true; 1098 } 1099 1100 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1101 template <typename SCAN_FUNC> 1102 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 1103 do_scan(Thread* thread, SCAN_FUNC& scan_f) 1104 { 1105 assert(_resize_lock_owner != thread, "Re-size lock held"); 1106 lock_resize_lock(thread); 1107 do_scan_locked(thread, scan_f); 1108 unlock_resize_lock(thread); 1109 assert(_resize_lock_owner != thread, "Re-size lock held"); 1110 } 1111 1112 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1113 template <typename EVALUATE_FUNC, typename DELETE_FUNC> 1114 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1115 try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) 1116 { 1117 if (!try_resize_lock(thread)) { 1118 return false; 1119 } 1120 do_bulk_delete_locked(thread, eval_f, del_f); 1121 unlock_resize_lock(thread); 1122 assert(_resize_lock_owner != thread, "Re-size lock held"); 1123 return true; 1124 } 1125 1126 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1127 template <typename EVALUATE_FUNC, typename DELETE_FUNC> 1128 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 1129 bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) 1130 { 1131 lock_resize_lock(thread); 1132 do_bulk_delete_locked(thread, eval_f, del_f); 1133 unlock_resize_lock(thread); 1134 } 1135 1136 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1137 template <typename VALUE_SIZE_FUNC> 1138 inline void ConcurrentHashTable<VALUE, CONFIG, F>:: 1139 statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, 1140 outputStream* st, const char* table_name) 1141 { 1142 NumberSeq summary; 1143 size_t literal_bytes = 0; 1144 if (!try_resize_lock(thread)) { 1145 st->print_cr("statistics unavailable at this moment"); 1146 return; 1147 } 1148 1149 InternalTable* table = get_table(); 1150 for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { 1151 ScopedCS cs(thread, this); 1152 size_t count = 0; 1153 Bucket* bucket = table->get_bucket(bucket_it); 1154 if (bucket->have_redirect() || bucket->is_locked()) { 1155 continue; 1156 } 1157 Node* current_node = bucket->first(); 1158 while (current_node != NULL) { 1159 ++count; 1160 literal_bytes += vs_f(current_node->value()); 1161 current_node = current_node->next(); 1162 } 1163 summary.add((double)count); 1164 } 1165 1166 double num_buckets = summary.num(); 1167 double num_entries = summary.sum(); 1168 1169 size_t bucket_bytes = num_buckets * sizeof(Bucket); 1170 size_t entry_bytes = num_entries * sizeof(Node); 1171 size_t total_bytes = literal_bytes + bucket_bytes + entry_bytes; 1172 1173 size_t bucket_size = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets); 1174 size_t entry_size = (num_entries <= 0) ? 0 : (entry_bytes / num_entries); 1175 1176 st->print_cr("%s statistics:", table_name); 1177 st->print_cr("Number of buckets : %9" PRIuPTR " = %9" PRIuPTR 1178 " bytes, each " SIZE_FORMAT, 1179 (size_t)num_buckets, bucket_bytes, bucket_size); 1180 st->print_cr("Number of entries : %9" PRIuPTR " = %9" PRIuPTR 1181 " bytes, each " SIZE_FORMAT, 1182 (size_t)num_entries, entry_bytes, entry_size); 1183 if (literal_bytes != 0) { 1184 double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries); 1185 st->print_cr("Number of literals : %9" PRIuPTR " = %9" PRIuPTR 1186 " bytes, avg %7.3f", 1187 (size_t)num_entries, literal_bytes, literal_avg); 1188 } 1189 st->print_cr("Total footprsize_t : %9s = %9" PRIuPTR " bytes", "" 1190 , total_bytes); 1191 st->print_cr("Average bucket size : %9.3f", summary.avg()); 1192 st->print_cr("Variance of bucket size : %9.3f", summary.variance()); 1193 st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); 1194 st->print_cr("Maximum bucket size : %9" PRIuPTR, 1195 (size_t)summary.maximum()); 1196 unlock_resize_lock(thread); 1197 } 1198 1199 template <typename VALUE, typename CONFIG, MEMFLAGS F> 1200 inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: 1201 try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht) 1202 { 1203 if (!try_resize_lock(thread)) { 1204 return false; 1205 } 1206 assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL"); 1207 for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { 1208 Bucket* bucket = _table->get_bucket(bucket_it); 1209 assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended"); 1210 while (bucket->first() != NULL) { 1211 Node* move_node = bucket->first(); 1212 bool ok = bucket->cas_first(move_node->next(), move_node); 1213 assert(ok, "Uncontended cas must work"); 1214 bool dead_hash = false; 1215 size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash); 1216 if (!dead_hash) { 1217 Bucket* insert_bucket = to_cht->get_bucket(insert_hash); 1218 assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present"); 1219 move_node->set_next(insert_bucket->first()); 1220 ok = insert_bucket->cas_first(move_node, insert_bucket->first()); 1221 assert(ok, "Uncontended cas must work"); 1222 } 1223 } 1224 } 1225 unlock_resize_lock(thread); 1226 return true; 1227 } 1228 1229 #endif // include guard