< prev index next >

src/hotspot/share/utilities/concurrentHashTable.inline.hpp

Print this page




 468   bool have_dead = false;
 469   while (rem_n != NULL) {
 470     if (lookup_f.equals(rem_n->value(), &have_dead)) {
 471       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 472       break;
 473     } else {
 474       rem_n_prev = rem_n->next_ptr();
 475       rem_n = rem_n->next();
 476     }
 477   }
 478 
 479   bucket->unlock();
 480 
 481   if (rem_n == NULL) {
 482     return false;
 483   }
 484   // Publish the deletion.
 485   GlobalCounter::write_synchronize();
 486   delete_f(rem_n->value());
 487   Node::destroy_node(rem_n);

 488   return true;
 489 }
 490 
 491 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 492 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 493 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 494   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
 495                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
 496 {
 497   // Here we have resize lock so table is SMR safe, and there is no new
 498   // table. Can do this in parallel if we want.
 499   assert((is_mt && _resize_lock_owner != NULL) ||
 500          (!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
 501   Node* ndel[BULK_DELETE_LIMIT];
 502   InternalTable* table = get_table();
 503   assert(start_idx < stop_idx, "Must be");
 504   assert(stop_idx <= _table->_size, "Must be");
 505   // Here manual do critical section since we don't want to take the cost of
 506   // locking the bucket if there is nothing to delete. But we can have
 507   // concurrent single deletes. The _invisible_epoch can only be used by the


 516     if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
 517         have_deletable(bucket, eval_f, prefetch_bucket)) {
 518         // Nothing to remove in this bucket.
 519         continue;
 520     }
 521 
 522     GlobalCounter::critical_section_end(thread, cs_context);
 523     // We left critical section but the bucket cannot be removed while we hold
 524     // the _resize_lock.
 525     bucket->lock();
 526     size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
 527     bucket->unlock();
 528     if (is_mt) {
 529       GlobalCounter::write_synchronize();
 530     } else {
 531       write_synchonize_on_visible_epoch(thread);
 532     }
 533     for (size_t node_it = 0; node_it < nd; node_it++) {
 534       del_f(ndel[node_it]->value());
 535       Node::destroy_node(ndel[node_it]);

 536       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 537     }
 538     cs_context = GlobalCounter::critical_section_begin(thread);
 539   }
 540   GlobalCounter::critical_section_end(thread, cs_context);
 541 }
 542 
 543 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 544 template <typename LOOKUP_FUNC>
 545 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 546   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 547 {
 548   assert(bucket->is_locked(), "Must be locked.");
 549 
 550   size_t dels = 0;
 551   Node* ndel[BULK_DELETE_LIMIT];
 552   Node* const volatile * rem_n_prev = bucket->first_ptr();
 553   Node* rem_n = bucket->first();
 554   while (rem_n != NULL) {
 555     bool is_dead = false;
 556     lookup_f.equals(rem_n->value(), &is_dead);
 557     if (is_dead) {
 558       ndel[dels++] = rem_n;
 559       Node* next_node = rem_n->next();
 560       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 561       rem_n = next_node;
 562       if (dels == BULK_DELETE_LIMIT) {
 563         break;
 564       }
 565     } else {
 566       rem_n_prev = rem_n->next_ptr();
 567       rem_n = rem_n->next();
 568     }
 569   }
 570   if (dels > 0) {
 571     GlobalCounter::write_synchronize();
 572     for (size_t node_it = 0; node_it < dels; node_it++) {
 573       Node::destroy_node(ndel[node_it]);

 574       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 575     }
 576   }
 577 }
 578 
 579 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 580 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 581 ConcurrentHashTable<VALUE, CONFIG, F>::
 582   get_bucket(uintx hash) const
 583 {
 584   InternalTable* table = get_table();
 585   Bucket* bucket = get_bucket_in(table, hash);
 586   if (bucket->have_redirect()) {
 587     table = get_new_table();
 588     bucket = get_bucket_in(table, hash);
 589   }
 590   return bucket;
 591 }
 592 
 593 template <typename VALUE, typename CONFIG, MEMFLAGS F>


 883   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
 884                   bool* grow_hint, bool* clean_hint)
 885 {
 886   bool ret = false;
 887   bool clean = false;
 888   bool locked;
 889   size_t loops = 0;
 890   size_t i = 0;
 891   uintx hash = lookup_f.get_hash();
 892   Node* new_node = Node::create_node(value, NULL);
 893 
 894   while (true) {
 895     {
 896       ScopedCS cs(thread, this); /* protected the table/bucket */
 897       Bucket* bucket = get_bucket(hash);
 898       Node* first_at_start = bucket->first();
 899       Node* old = get_node(bucket, lookup_f, &clean, &loops);
 900       if (old == NULL) {
 901         new_node->set_next(first_at_start);
 902         if (bucket->cas_first(new_node, first_at_start)) {

 903           new_node = NULL;
 904           ret = true;
 905           break; /* leave critical section */
 906         }
 907         // CAS failed we must leave critical section and retry.
 908         locked = bucket->is_locked();
 909       } else {
 910         // There is a duplicate.
 911         break; /* leave critical section */
 912       }
 913     } /* leave critical section */
 914     i++;
 915     if (locked) {
 916       os::naked_yield();
 917     } else {
 918       SpinPause();
 919     }
 920   }
 921 
 922   if (new_node != NULL) {


 991       if (dels == num_del) {
 992         break;
 993       }
 994     } else {
 995       rem_n_prev = rem_n->next_ptr();
 996       rem_n = rem_n->next();
 997     }
 998   }
 999   return dels;
1000 }
1001 
1002 // Constructor
1003 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1004 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1005   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
1006     : _new_table(NULL), _log2_size_limit(log2size_limit),
1007        _log2_start_size(log2size), _grow_hint(grow_hint),
1008        _size_limit_reached(false), _resize_lock_owner(NULL),
1009        _invisible_epoch(0)
1010 {

1011   _resize_lock =
1012     new Mutex(Mutex::leaf, "ConcurrentHashTable", false,
1013               Monitor::_safepoint_check_never);
1014   _table = new InternalTable(log2size);
1015   assert(log2size_limit >= log2size, "bad ergo");
1016   _size_limit_reached = _table->_log2_size == _log2_size_limit;
1017 }
1018 
1019 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1020 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1021   ~ConcurrentHashTable()
1022 {
1023   delete _resize_lock;
1024   free_nodes();
1025   delete _table;
1026 }
1027 
1028 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1029 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
1030   get_size_log2(Thread* thread)


1064   }
1065   return ret;
1066 }
1067 
1068 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1069 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1070   unsafe_insert(const VALUE& value) {
1071   bool dead_hash = false;
1072   size_t hash = CONFIG::get_hash(value, &dead_hash);
1073   if (dead_hash) {
1074     return false;
1075   }
1076   // This is an unsafe operation.
1077   InternalTable* table = get_table();
1078   Bucket* bucket = get_bucket_in(table, hash);
1079   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1080   Node* new_node = Node::create_node(value, bucket->first());
1081   if (!bucket->cas_first(new_node, bucket->first())) {
1082     assert(false, "bad");
1083   }

1084   return true;
1085 }
1086 
1087 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1088 template <typename SCAN_FUNC>
1089 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1090   try_scan(Thread* thread, SCAN_FUNC& scan_f)
1091 {
1092   if (!try_resize_lock(thread)) {
1093     return false;
1094   }
1095   do_scan_locked(thread, scan_f);
1096   unlock_resize_lock(thread);
1097   return true;
1098 }
1099 
1100 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1101 template <typename SCAN_FUNC>
1102 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1103   do_scan(Thread* thread, SCAN_FUNC& scan_f)


1165   do_bulk_delete_locked(thread, eval_f, del_f);
1166   unlock_resize_lock(thread);
1167   assert(_resize_lock_owner != thread, "Re-size lock held");
1168   return true;
1169 }
1170 
1171 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1172 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1173 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1174   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1175 {
1176   assert(!SafepointSynchronize::is_at_safepoint(),
1177          "must be outside a safepoint");
1178   lock_resize_lock(thread);
1179   do_bulk_delete_locked(thread, eval_f, del_f);
1180   unlock_resize_lock(thread);
1181 }
1182 
1183 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1184 template <typename VALUE_SIZE_FUNC>
1185 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1186   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1187                 outputStream* st, const char* table_name)
1188 {
1189   NumberSeq summary;
1190   size_t literal_bytes = 0;
1191   if (!try_resize_lock(thread)) {
1192     st->print_cr("statistics unavailable at this moment");
1193     return;
1194   }
1195 
1196   InternalTable* table = get_table();
1197   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1198     ScopedCS cs(thread, this);
1199     size_t count = 0;
1200     Bucket* bucket = table->get_bucket(bucket_it);
1201     if (bucket->have_redirect() || bucket->is_locked()) {
1202         continue;
1203     }
1204     Node* current_node = bucket->first();
1205     while (current_node != NULL) {
1206       ++count;
1207       literal_bytes += vs_f(current_node->value());
1208       current_node = current_node->next();
1209     }
1210     summary.add((double)count);
1211   }
1212 
1213   double num_buckets = summary.num();
1214   double num_entries = summary.sum();


























1215 
1216   size_t bucket_bytes = num_buckets * sizeof(Bucket);
1217   size_t entry_bytes  = num_entries * sizeof(Node);
1218   size_t total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
1219 
1220   size_t bucket_size  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
1221   size_t entry_size   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
1222 
1223   st->print_cr("%s statistics:", table_name);
1224   st->print_cr("Number of buckets       : %9" PRIuPTR " = %9" PRIuPTR
1225                " bytes, each " SIZE_FORMAT,
1226                (size_t)num_buckets, bucket_bytes,  bucket_size);
1227   st->print_cr("Number of entries       : %9" PRIuPTR " = %9" PRIuPTR
1228                " bytes, each " SIZE_FORMAT,
1229                (size_t)num_entries, entry_bytes,   entry_size);
1230   if (literal_bytes != 0) {
1231     double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
1232     st->print_cr("Number of literals      : %9" PRIuPTR " = %9" PRIuPTR
1233                  " bytes, avg %7.3f",
1234                  (size_t)num_entries, literal_bytes, literal_avg);
1235   }
1236   st->print_cr("Total footprsize_t         : %9s = %9" PRIuPTR " bytes", ""
1237                , total_bytes);
1238   st->print_cr("Average bucket size     : %9.3f", summary.avg());
1239   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
1240   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
1241   st->print_cr("Maximum bucket size     : %9" PRIuPTR,
1242                (size_t)summary.maximum());
1243   unlock_resize_lock(thread);


1244 }
1245 
1246 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1247 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1248   try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht)
1249 {
1250   if (!try_resize_lock(thread)) {
1251     return false;
1252   }
1253   assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");
1254   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1255     Bucket* bucket = _table->get_bucket(bucket_it);
1256     assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1257     while (bucket->first() != NULL) {
1258       Node* move_node = bucket->first();
1259       bool ok = bucket->cas_first(move_node->next(), move_node);
1260       assert(ok, "Uncontended cas must work");
1261       bool dead_hash = false;
1262       size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1263       if (!dead_hash) {


 468   bool have_dead = false;
 469   while (rem_n != NULL) {
 470     if (lookup_f.equals(rem_n->value(), &have_dead)) {
 471       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
 472       break;
 473     } else {
 474       rem_n_prev = rem_n->next_ptr();
 475       rem_n = rem_n->next();
 476     }
 477   }
 478 
 479   bucket->unlock();
 480 
 481   if (rem_n == NULL) {
 482     return false;
 483   }
 484   // Publish the deletion.
 485   GlobalCounter::write_synchronize();
 486   delete_f(rem_n->value());
 487   Node::destroy_node(rem_n);
 488   JFR_ONLY(_stats_rate.remove();)
 489   return true;
 490 }
 491 
 492 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 493 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 494 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 495   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
 496                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
 497 {
 498   // Here we have resize lock so table is SMR safe, and there is no new
 499   // table. Can do this in parallel if we want.
 500   assert((is_mt && _resize_lock_owner != NULL) ||
 501          (!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
 502   Node* ndel[BULK_DELETE_LIMIT];
 503   InternalTable* table = get_table();
 504   assert(start_idx < stop_idx, "Must be");
 505   assert(stop_idx <= _table->_size, "Must be");
 506   // Here manual do critical section since we don't want to take the cost of
 507   // locking the bucket if there is nothing to delete. But we can have
 508   // concurrent single deletes. The _invisible_epoch can only be used by the


 517     if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
 518         have_deletable(bucket, eval_f, prefetch_bucket)) {
 519         // Nothing to remove in this bucket.
 520         continue;
 521     }
 522 
 523     GlobalCounter::critical_section_end(thread, cs_context);
 524     // We left critical section but the bucket cannot be removed while we hold
 525     // the _resize_lock.
 526     bucket->lock();
 527     size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
 528     bucket->unlock();
 529     if (is_mt) {
 530       GlobalCounter::write_synchronize();
 531     } else {
 532       write_synchonize_on_visible_epoch(thread);
 533     }
 534     for (size_t node_it = 0; node_it < nd; node_it++) {
 535       del_f(ndel[node_it]->value());
 536       Node::destroy_node(ndel[node_it]);
 537       JFR_ONLY(_stats_rate.remove();)
 538       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 539     }
 540     cs_context = GlobalCounter::critical_section_begin(thread);
 541   }
 542   GlobalCounter::critical_section_end(thread, cs_context);
 543 }
 544 
 545 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 546 template <typename LOOKUP_FUNC>
 547 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
 548   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 549 {
 550   assert(bucket->is_locked(), "Must be locked.");
 551 
 552   size_t dels = 0;
 553   Node* ndel[BULK_DELETE_LIMIT];
 554   Node* const volatile * rem_n_prev = bucket->first_ptr();
 555   Node* rem_n = bucket->first();
 556   while (rem_n != NULL) {
 557     bool is_dead = false;
 558     lookup_f.equals(rem_n->value(), &is_dead);
 559     if (is_dead) {
 560       ndel[dels++] = rem_n;
 561       Node* next_node = rem_n->next();
 562       bucket->release_assign_node_ptr(rem_n_prev, next_node);
 563       rem_n = next_node;
 564       if (dels == BULK_DELETE_LIMIT) {
 565         break;
 566       }
 567     } else {
 568       rem_n_prev = rem_n->next_ptr();
 569       rem_n = rem_n->next();
 570     }
 571   }
 572   if (dels > 0) {
 573     GlobalCounter::write_synchronize();
 574     for (size_t node_it = 0; node_it < dels; node_it++) {
 575       Node::destroy_node(ndel[node_it]);
 576       JFR_ONLY(_stats_rate.remove();)
 577       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
 578     }
 579   }
 580 }
 581 
 582 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 583 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
 584 ConcurrentHashTable<VALUE, CONFIG, F>::
 585   get_bucket(uintx hash) const
 586 {
 587   InternalTable* table = get_table();
 588   Bucket* bucket = get_bucket_in(table, hash);
 589   if (bucket->have_redirect()) {
 590     table = get_new_table();
 591     bucket = get_bucket_in(table, hash);
 592   }
 593   return bucket;
 594 }
 595 
 596 template <typename VALUE, typename CONFIG, MEMFLAGS F>


 886   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
 887                   bool* grow_hint, bool* clean_hint)
 888 {
 889   bool ret = false;
 890   bool clean = false;
 891   bool locked;
 892   size_t loops = 0;
 893   size_t i = 0;
 894   uintx hash = lookup_f.get_hash();
 895   Node* new_node = Node::create_node(value, NULL);
 896 
 897   while (true) {
 898     {
 899       ScopedCS cs(thread, this); /* protected the table/bucket */
 900       Bucket* bucket = get_bucket(hash);
 901       Node* first_at_start = bucket->first();
 902       Node* old = get_node(bucket, lookup_f, &clean, &loops);
 903       if (old == NULL) {
 904         new_node->set_next(first_at_start);
 905         if (bucket->cas_first(new_node, first_at_start)) {
 906           JFR_ONLY(_stats_rate.add();)
 907           new_node = NULL;
 908           ret = true;
 909           break; /* leave critical section */
 910         }
 911         // CAS failed we must leave critical section and retry.
 912         locked = bucket->is_locked();
 913       } else {
 914         // There is a duplicate.
 915         break; /* leave critical section */
 916       }
 917     } /* leave critical section */
 918     i++;
 919     if (locked) {
 920       os::naked_yield();
 921     } else {
 922       SpinPause();
 923     }
 924   }
 925 
 926   if (new_node != NULL) {


 995       if (dels == num_del) {
 996         break;
 997       }
 998     } else {
 999       rem_n_prev = rem_n->next_ptr();
1000       rem_n = rem_n->next();
1001     }
1002   }
1003   return dels;
1004 }
1005 
1006 // Constructor
1007 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1008 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1009   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
1010     : _new_table(NULL), _log2_size_limit(log2size_limit),
1011        _log2_start_size(log2size), _grow_hint(grow_hint),
1012        _size_limit_reached(false), _resize_lock_owner(NULL),
1013        _invisible_epoch(0)
1014 {
1015   _stats_rate = TableRateStatistics();
1016   _resize_lock =
1017     new Mutex(Mutex::leaf, "ConcurrentHashTable", false,
1018               Monitor::_safepoint_check_never);
1019   _table = new InternalTable(log2size);
1020   assert(log2size_limit >= log2size, "bad ergo");
1021   _size_limit_reached = _table->_log2_size == _log2_size_limit;
1022 }
1023 
1024 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1025 inline ConcurrentHashTable<VALUE, CONFIG, F>::
1026   ~ConcurrentHashTable()
1027 {
1028   delete _resize_lock;
1029   free_nodes();
1030   delete _table;
1031 }
1032 
1033 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1034 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
1035   get_size_log2(Thread* thread)


1069   }
1070   return ret;
1071 }
1072 
1073 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1074 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1075   unsafe_insert(const VALUE& value) {
1076   bool dead_hash = false;
1077   size_t hash = CONFIG::get_hash(value, &dead_hash);
1078   if (dead_hash) {
1079     return false;
1080   }
1081   // This is an unsafe operation.
1082   InternalTable* table = get_table();
1083   Bucket* bucket = get_bucket_in(table, hash);
1084   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1085   Node* new_node = Node::create_node(value, bucket->first());
1086   if (!bucket->cas_first(new_node, bucket->first())) {
1087     assert(false, "bad");
1088   }
1089   JFR_ONLY(_stats_rate.add();)
1090   return true;
1091 }
1092 
1093 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1094 template <typename SCAN_FUNC>
1095 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1096   try_scan(Thread* thread, SCAN_FUNC& scan_f)
1097 {
1098   if (!try_resize_lock(thread)) {
1099     return false;
1100   }
1101   do_scan_locked(thread, scan_f);
1102   unlock_resize_lock(thread);
1103   return true;
1104 }
1105 
1106 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1107 template <typename SCAN_FUNC>
1108 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1109   do_scan(Thread* thread, SCAN_FUNC& scan_f)


1171   do_bulk_delete_locked(thread, eval_f, del_f);
1172   unlock_resize_lock(thread);
1173   assert(_resize_lock_owner != thread, "Re-size lock held");
1174   return true;
1175 }
1176 
1177 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1178 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1179 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1180   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1181 {
1182   assert(!SafepointSynchronize::is_at_safepoint(),
1183          "must be outside a safepoint");
1184   lock_resize_lock(thread);
1185   do_bulk_delete_locked(thread, eval_f, del_f);
1186   unlock_resize_lock(thread);
1187 }
1188 
1189 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1190 template <typename VALUE_SIZE_FUNC>
1191 inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>::
1192   statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)

1193 {
1194   NumberSeq summary;
1195   size_t literal_bytes = 0;





1196   InternalTable* table = get_table();
1197   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1198     ScopedCS cs(thread, this);
1199     size_t count = 0;
1200     Bucket* bucket = table->get_bucket(bucket_it);
1201     if (bucket->have_redirect() || bucket->is_locked()) {
1202       continue;
1203     }
1204     Node* current_node = bucket->first();
1205     while (current_node != NULL) {
1206       ++count;
1207       literal_bytes += vs_f(current_node->value());
1208       current_node = current_node->next();
1209     }
1210     summary.add((double)count);
1211   }
1212 
1213   return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node));
1214 }
1215 
1216 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1217 template <typename VALUE_SIZE_FUNC>
1218 inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>::
1219   statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old)
1220 {
1221   if (!try_resize_lock(thread)) {
1222     return old;
1223   }
1224 
1225   TableStatistics ts = statistics_calculate(thread, vs_f);
1226   unlock_resize_lock(thread);
1227 
1228   return ts;
1229 }
1230 
1231 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1232 template <typename VALUE_SIZE_FUNC>
1233 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1234   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1235                 outputStream* st, const char* table_name)
1236 {
1237   if (!try_resize_lock(thread)) {
1238     st->print_cr("statistics unavailable at this moment");
1239     return;
1240   }
1241 
1242   TableStatistics ts = statistics_calculate(thread, vs_f);


























1243   unlock_resize_lock(thread);
1244 
1245   ts.print(st, table_name);
1246 }
1247 
1248 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1249 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1250   try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht)
1251 {
1252   if (!try_resize_lock(thread)) {
1253     return false;
1254   }
1255   assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");
1256   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1257     Bucket* bucket = _table->get_bucket(bucket_it);
1258     assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1259     while (bucket->first() != NULL) {
1260       Node* move_node = bucket->first();
1261       bool ok = bucket->cas_first(move_node->next(), move_node);
1262       assert(ok, "Uncontended cas must work");
1263       bool dead_hash = false;
1264       size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1265       if (!dead_hash) {
< prev index next >