< prev index next >

src/hotspot/share/utilities/concurrentHashTable.inline.hpp

Print this page




 246 {
 247   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
 248 }
 249 
 250 // HaveDeletables
 251 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 252 template <typename EVALUATE_FUNC>
 253 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 254   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 255                                                       EVALUATE_FUNC& eval_f,
 256                                                       Bucket* prefetch_bucket)
 257 {
 258   // Instantiated for pointer type (true), so we can use prefetch.
 259   // When visiting all Nodes doing this prefetch give around 30%.
 260   Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
 261   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 262     if (pref != NULL) {
 263       Prefetch::read(*pref->value(), 0);
 264       pref = pref->next();
 265     }
 266     if (next->next() != NULL) {
 267       Prefetch::read(*next->next()->value(), 0);



 268     }
 269     if (eval_f(next->value())) {
 270       return true;
 271     }
 272   }
 273   return false;
 274 }
 275 
 276 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 277 template <bool b, typename EVALUATE_FUNC>
 278 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 279   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 280                                                    EVALUATE_FUNC& eval_f,
 281                                                    Bucket* preb)
 282 {
 283   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 284     if (eval_f(next->value())) {
 285       return true;
 286     }
 287   }


 529     }
 530     GlobalCounter::critical_section_begin(thread);
 531   }
 532   GlobalCounter::critical_section_end(thread);
 533 }
 534 
 535 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 536 template <typename LOOKUP_FUNC>
 537 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 538   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 539 {
 540   size_t dels = 0;
 541   Node* ndel[BULK_DELETE_LIMIT];
 542   Node* const volatile * rem_n_prev = bucket->first_ptr();
 543   Node* rem_n = bucket->first();
 544   while (rem_n != NULL) {
 545     bool is_dead = false;
 546     lookup_f.equals(rem_n->value(), &is_dead);
 547     if (is_dead) {
 548       ndel[dels++] = rem_n;
 549       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 550       rem_n = rem_n->next();

 551       if (dels == BULK_DELETE_LIMIT) {
 552         break;
 553       }
 554     } else {
 555       rem_n_prev = rem_n->next_ptr();
 556       rem_n = rem_n->next();
 557     }
 558   }
 559   if (dels > 0) {
 560     GlobalCounter::write_synchronize();
 561     for (size_t node_it = 0; node_it < dels; node_it++) {
 562       Node::destroy_node(ndel[node_it]);
 563       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 564     }
 565   }
 566 }
 567 
 568 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 569 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 570 ConcurrentHashTable<VALUE, CONFIG, F>::


 637 }
 638 
 639 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 640 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 641   unzip_bucket(Thread* thread, InternalTable* old_table,
 642                InternalTable* new_table, size_t even_index, size_t odd_index)
 643 {
 644   Node* aux = old_table->get_bucket(even_index)->first();
 645   if (aux == NULL) {
 646     // This is an empty bucket and in debug we poison first ptr in bucket.
 647     // Therefore we must make sure no readers are looking at this bucket.
 648     // If we don't do a write_synch here, caller must do it.
 649     return false;
 650   }
 651   Node* delete_me = NULL;
 652   Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
 653   Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
 654   while (aux != NULL) {
 655     bool dead_hash = false;
 656     size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);

 657     if (dead_hash) {
 658       delete_me = aux;
 659       // This item is dead, move both list to next
 660       new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 661                                                                 aux->next());
 662       new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 663                                                                  aux->next());
 664     } else {
 665       size_t aux_index = bucket_idx_hash(new_table, aux_hash);
 666       if (aux_index == even_index) {
 667         // This is a even, so move odd to aux/even next
 668         new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 669                                                                   aux->next());
 670         // Keep in even list
 671         even = aux->next_ptr();
 672       } else if (aux_index == odd_index) {
 673         // This is a odd, so move odd to aux/odd next
 674         new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 675                                                                    aux->next());
 676         // Keep in odd list
 677         odd = aux->next_ptr();
 678       } else {
 679         fatal("aux_index does not match even or odd indices");
 680       }
 681     }
 682     aux = aux->next();
 683 
 684     // We can only move 1 pointer otherwise a reader might be moved to the wrong
 685     // chain. E.g. looking for even hash value but got moved to the odd bucket
 686     // chain.
 687     write_synchonize_on_visible_epoch(thread);
 688     if (delete_me != NULL) {
 689       Node::destroy_node(delete_me);
 690       delete_me = NULL;
 691     }
 692   }
 693   return true;
 694 }
 695 
 696 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 697 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 698   internal_shrink_prolog(Thread* thread, size_t log2_size)
 699 {
 700   if (!try_resize_lock(thread)) {
 701     return false;
 702   }


 959   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
 960     ScopedCS cs(thread, this);
 961     if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
 962       break; /* ends critical section */
 963     }
 964   } /* ends critical section */
 965 }
 966 
 967 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 968 template <typename EVALUATE_FUNC>
 969 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
 970   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
 971                      size_t num_del, Node** ndel)
 972 {
 973   size_t dels = 0;
 974   Node* const volatile * rem_n_prev = bucket->first_ptr();
 975   Node* rem_n = bucket->first();
 976   while (rem_n != NULL) {
 977     if (eval_f(rem_n->value())) {
 978       ndel[dels++] = rem_n;
 979       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 980       rem_n = rem_n->next();

 981       if (dels == num_del) {
 982         break;
 983       }
 984     } else {
 985       rem_n_prev = rem_n->next_ptr();
 986       rem_n = rem_n->next();
 987     }
 988   }
 989   return dels;
 990 }
 991 
 992 // Constructor
 993 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 994 inline ConcurrentHashTable<VALUE, CONFIG, F>::
 995   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
 996     : _new_table(NULL), _log2_start_size(log2size),
 997        _log2_size_limit(log2size_limit), _grow_hint(grow_hint),
 998        _size_limit_reached(false), _resize_lock_owner(NULL),
 999        _invisible_epoch(0)
1000 {




 246 {
 247   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
 248 }
 249 
 250 // HaveDeletables
 251 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 252 template <typename EVALUATE_FUNC>
 253 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 254   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 255                                                       EVALUATE_FUNC& eval_f,
 256                                                       Bucket* prefetch_bucket)
 257 {
 258   // Instantiated for pointer type (true), so we can use prefetch.
 259   // When visiting all Nodes doing this prefetch give around 30%.
 260   Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
 261   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 262     if (pref != NULL) {
 263       Prefetch::read(*pref->value(), 0);
 264       pref = pref->next();
 265     }
 266     // Read next() Node* once.  May be racing with a thread moving the next
 267     // pointers.
 268     Node* next_pref = next->next();
 269     if (next_pref != NULL) {
 270       Prefetch::read(*next_pref->value(), 0);
 271     }
 272     if (eval_f(next->value())) {
 273       return true;
 274     }
 275   }
 276   return false;
 277 }
 278 
 279 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 280 template <bool b, typename EVALUATE_FUNC>
 281 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 282   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 283                                                    EVALUATE_FUNC& eval_f,
 284                                                    Bucket* preb)
 285 {
 286   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 287     if (eval_f(next->value())) {
 288       return true;
 289     }
 290   }


 532     }
 533     GlobalCounter::critical_section_begin(thread);
 534   }
 535   GlobalCounter::critical_section_end(thread);
 536 }
 537 
 538 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 539 template <typename LOOKUP_FUNC>
 540 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 541   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 542 {
 543   size_t dels = 0;
 544   Node* ndel[BULK_DELETE_LIMIT];
 545   Node* const volatile * rem_n_prev = bucket->first_ptr();
 546   Node* rem_n = bucket->first();
 547   while (rem_n != NULL) {
 548     bool is_dead = false;
 549     lookup_f.equals(rem_n->value(), &is_dead);
 550     if (is_dead) {
 551       ndel[dels++] = rem_n;
 552       Node* next_node = rem_n->next();
 553       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 554       rem_n = next_node;
 555       if (dels == BULK_DELETE_LIMIT) {
 556         break;
 557       }
 558     } else {
 559       rem_n_prev = rem_n->next_ptr();
 560       rem_n = rem_n->next();
 561     }
 562   }
 563   if (dels > 0) {
 564     GlobalCounter::write_synchronize();
 565     for (size_t node_it = 0; node_it < dels; node_it++) {
 566       Node::destroy_node(ndel[node_it]);
 567       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 568     }
 569   }
 570 }
 571 
 572 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 573 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 574 ConcurrentHashTable<VALUE, CONFIG, F>::


 641 }
 642 
 643 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 644 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 645   unzip_bucket(Thread* thread, InternalTable* old_table,
 646                InternalTable* new_table, size_t even_index, size_t odd_index)
 647 {
 648   Node* aux = old_table->get_bucket(even_index)->first();
 649   if (aux == NULL) {
 650     // This is an empty bucket and in debug we poison first ptr in bucket.
 651     // Therefore we must make sure no readers are looking at this bucket.
 652     // If we don't do a write_synch here, caller must do it.
 653     return false;
 654   }
 655   Node* delete_me = NULL;
 656   Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
 657   Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
 658   while (aux != NULL) {
 659     bool dead_hash = false;
 660     size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
 661     Node* aux_next = aux->next();
 662     if (dead_hash) {
 663       delete_me = aux;
 664       // This item is dead, move both list to next
 665       new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 666                                                                 aux_next);
 667       new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 668                                                                  aux_next);
 669     } else {
 670       size_t aux_index = bucket_idx_hash(new_table, aux_hash);
 671       if (aux_index == even_index) {
 672         // This is a even, so move odd to aux/even next
 673         new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
 674                                                                   aux_next);
 675         // Keep in even list
 676         even = aux->next_ptr();
 677       } else if (aux_index == odd_index) {
 678         // This is a odd, so move odd to aux/odd next
 679         new_table->get_bucket(even_index)->release_assign_node_ptr(even,
 680                                                                    aux_next);
 681         // Keep in odd list
 682         odd = aux->next_ptr();
 683       } else {
 684         fatal("aux_index does not match even or odd indices");
 685       }
 686     }
 687     aux = aux_next;
 688 
 689     // We can only move 1 pointer otherwise a reader might be moved to the wrong
 690     // chain. E.g. looking for even hash value but got moved to the odd bucket
 691     // chain.
 692     write_synchonize_on_visible_epoch(thread);
 693     if (delete_me != NULL) {
 694       Node::destroy_node(delete_me);
 695       delete_me = NULL;
 696     }
 697   }
 698   return true;
 699 }
 700 
 701 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 702 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
 703   internal_shrink_prolog(Thread* thread, size_t log2_size)
 704 {
 705   if (!try_resize_lock(thread)) {
 706     return false;
 707   }


 964   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
 965     ScopedCS cs(thread, this);
 966     if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
 967       break; /* ends critical section */
 968     }
 969   } /* ends critical section */
 970 }
 971 
 972 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 973 template <typename EVALUATE_FUNC>
 974 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
 975   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
 976                      size_t num_del, Node** ndel)
 977 {
 978   size_t dels = 0;
 979   Node* const volatile * rem_n_prev = bucket->first_ptr();
 980   Node* rem_n = bucket->first();
 981   while (rem_n != NULL) {
 982     if (eval_f(rem_n->value())) {
 983       ndel[dels++] = rem_n;
 984       Node* next_node = rem_n->next();
 985       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 986       rem_n = next_node;
 987       if (dels == num_del) {
 988         break;
 989       }
 990     } else {
 991       rem_n_prev = rem_n->next_ptr();
 992       rem_n = rem_n->next();
 993     }
 994   }
 995   return dels;
 996 }
 997 
 998 // Constructor
 999 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1000 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1001   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
1002     : _new_table(NULL), _log2_start_size(log2size),
1003        _log2_size_limit(log2size_limit), _grow_hint(grow_hint),
1004        _size_limit_reached(false), _resize_lock_owner(NULL),
1005        _invisible_epoch(0)
1006 {


< prev index next >