276 template <typename VALUE, typename CONFIG, MEMFLAGS F>
277 template <bool b, typename EVALUATE_FUNC>
278 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
279 HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
280 EVALUATE_FUNC& eval_f,
281 Bucket* preb)
282 {
283 for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
284 if (eval_f(next->value())) {
285 return true;
286 }
287 }
288 return false;
289 }
290
291 // ConcurrentHashTable
292 template <typename VALUE, typename CONFIG, MEMFLAGS F>
293 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
294 write_synchonize_on_visible_epoch(Thread* thread)
295 {
296 assert(_resize_lock->owned_by_self(), "Re-size lock not held");
297 OrderAccess::fence(); // Prevent below load from floating up.
298 // If no reader saw this version we can skip write_synchronize.
299 if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
300 return;
301 }
302 assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
303 // We set this/next version that we are synchronizing for to not published.
304 // A reader will zero this flag if it reads this/next version.
305 OrderAccess::release_store(&_invisible_epoch, thread);
306 GlobalCounter::write_synchronize();
307 }
308
309 template <typename VALUE, typename CONFIG, MEMFLAGS F>
310 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
311 try_resize_lock(Thread* locker)
312 {
313 if (_resize_lock->try_lock()) {
314 if (_resize_lock_owner != NULL) {
315 assert(locker != _resize_lock_owner, "Already own lock");
316 // We got mutex but internal state is locked.
471 bucket->unlock();
472
473 if (rem_n == NULL) {
474 return false;
475 }
476 // Publish the deletion.
477 GlobalCounter::write_synchronize();
478 delete_f(rem_n->value());
479 Node::destroy_node(rem_n);
480 return true;
481 }
482
483 template <typename VALUE, typename CONFIG, MEMFLAGS F>
484 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
485 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
486 do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
487 EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
488 {
489 // Here we have resize lock so table is SMR safe, and there is no new
490 // table. Can do this in parallel if we want.
491 assert(_resize_lock->owned_by_self(), "Re-size lock not held");
492 Node* ndel[BULK_DELETE_LIMIT];
493 InternalTable* table = get_table();
494 assert(start_idx < stop_idx, "Must be");
495 assert(stop_idx <= _table->_size, "Must be");
496 // Here manual do critical section since we don't want to take the cost of
497 // locking the bucket if there is nothing to delete. But we can have
498 // concurrent single deletes. The _invisible_epoch can only be used by the
499 // owner of _resize_lock, us here. There we should not changed it in our
500 // own read-side.
501 GlobalCounter::critical_section_begin(thread);
502 for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
503 Bucket* bucket = _table->get_bucket(bucket_it);
504 Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
505 _table->get_bucket(bucket_it+1) : NULL;
506
507 if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
508 have_deletable(bucket, eval_f, prefetch_bucket)) {
509 // Nothing to remove in this bucket.
510 continue;
511 }
512
513 GlobalCounter::critical_section_end(thread);
514 // We left critical section but the bucket cannot be removed while we hold
515 // the _resize_lock.
516 bucket->lock();
517 size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
518 bucket->unlock();
519 write_synchonize_on_visible_epoch(thread);
520 for (size_t node_it = 0; node_it < nd; node_it++) {
521 del_f(ndel[node_it]->value());
522 Node::destroy_node(ndel[node_it]);
523 DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
524 }
525 GlobalCounter::critical_section_begin(thread);
678
679 // We can only move 1 pointer otherwise a reader might be moved to the wrong
680 // chain. E.g. looking for even hash value but got moved to the odd bucket
681 // chain.
682 write_synchonize_on_visible_epoch(thread);
683 if (delete_me != NULL) {
684 Node::destroy_node(delete_me);
685 delete_me = NULL;
686 }
687 }
688 return true;
689 }
690
691 template <typename VALUE, typename CONFIG, MEMFLAGS F>
692 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
693 internal_shrink_prolog(Thread* thread, size_t log2_size)
694 {
695 if (!try_resize_lock(thread)) {
696 return false;
697 }
698
699 assert(_resize_lock->owned_by_self(), "Re-size lock not held");
700
701 if (_table->_log2_size == _log2_start_size ||
702 _table->_log2_size <= log2_size) {
703 unlock_resize_lock(thread);
704 return false;
705 }
706
707 _new_table = new InternalTable(_table->_log2_size - 1);
708
709 return true;
710 }
711
712 template <typename VALUE, typename CONFIG, MEMFLAGS F>
713 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
714 internal_shrink_epilog(Thread* thread)
715 {
716 assert(_resize_lock->owned_by_self(), "Re-size lock not held");
717 assert(_resize_lock_owner, "Should be locked");
718
719 InternalTable* old_table = set_table_from_new();
720 _size_limit_reached = false;
721 unlock_resize_lock(thread);
722 #ifdef ASSERT
723 for (size_t i = 0; i < old_table->_size; i++) {
724 assert(old_table->get_bucket(i++)->first() == POISON_PTR,
725 "No poison found");
726 }
727 #endif
728 // ABA safe, old_table not visible to any other threads.
729 delete old_table;
730 }
731
732 template <typename VALUE, typename CONFIG, MEMFLAGS F>
733 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
734 internal_shrink_range(Thread* thread, size_t start, size_t stop)
735 {
736 // The state is also copied here.
737 // Hence all buckets in new table will be locked.
754 b_old_even->redirect();
755 b_old_odd->redirect();
756
757 write_synchonize_on_visible_epoch(thread);
758
759 // Unlock for writes into new smaller table.
760 _new_table->get_bucket(bucket_it)->unlock();
761
762 DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
763 (Node*)POISON_PTR);)
764 DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
765 (Node*)POISON_PTR);)
766 }
767 }
768
769 template <typename VALUE, typename CONFIG, MEMFLAGS F>
770 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
771 internal_shrink(Thread* thread, size_t log2_size)
772 {
773 if (!internal_shrink_prolog(thread, log2_size)) {
774 assert(!_resize_lock->owned_by_self(), "Re-size lock held");
775 return false;
776 }
777 assert(_resize_lock->owned_by_self(), "Re-size lock not held");
778 assert(_resize_lock_owner == thread, "Should be locked by me");
779 internal_shrink_range(thread, 0, _new_table->_size);
780 internal_shrink_epilog(thread);
781 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
782 return true;
783 }
784
785 template <typename VALUE, typename CONFIG, MEMFLAGS F>
786 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
787 internal_grow_prolog(Thread* thread, size_t log2_size)
788 {
789 // This double checking of _size_limit_reached/is_max_size_reached()
790 // we only do in grow path, since grow means high load on table
791 // while shrink means low load.
792 if (is_max_size_reached()) {
793 return false;
794 }
795 if (!try_resize_lock(thread)) {
796 // Either we have an ongoing resize or an operation which doesn't want us
797 // to resize now.
798 return false;
799 }
800 if (is_max_size_reached() || _table->_log2_size >= log2_size) {
801 unlock_resize_lock(thread);
802 return false;
803 }
804
805 _new_table = new InternalTable(_table->_log2_size + 1);
806
807 if (_new_table->_log2_size == _log2_size_limit) {
808 _size_limit_reached = true;
809 }
810
811 return true;
812 }
813
814 template <typename VALUE, typename CONFIG, MEMFLAGS F>
815 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
816 internal_grow_epilog(Thread* thread)
817 {
818 assert(_resize_lock->owned_by_self(), "Re-size lock not held");
819 assert(_resize_lock_owner, "Should be locked");
820
821 InternalTable* old_table = set_table_from_new();
822 unlock_resize_lock(thread);
823 #ifdef ASSERT
824 for (size_t i = 0; i < old_table->_size; i++) {
825 assert(old_table->get_bucket(i++)->first() == POISON_PTR,
826 "No poison found");
827 }
828 #endif
829 // ABA safe, old_table not visible to any other threads.
830 delete old_table;
831 }
832
833 template <typename VALUE, typename CONFIG, MEMFLAGS F>
834 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
835 internal_grow(Thread* thread, size_t log2_size)
836 {
837 if (!internal_grow_prolog(thread, log2_size)) {
838 assert(!_resize_lock->owned_by_self(), "Re-size lock held");
839 return false;
840 }
841 assert(_resize_lock->owned_by_self(), "Re-size lock not held");
842 assert(_resize_lock_owner == thread, "Should be locked by me");
843 internal_grow_range(thread, 0, _table->_size);
844 internal_grow_epilog(thread);
845 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
846 return true;
847 }
848
849 // Always called within critical section
850 template <typename VALUE, typename CONFIG, MEMFLAGS F>
851 template <typename LOOKUP_FUNC>
852 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
853 internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
854 {
855 bool clean = false;
856 size_t loops = 0;
857 VALUE* ret = NULL;
858
859 const Bucket* bucket = get_bucket(lookup_f.get_hash());
860 Node* node = get_node(bucket, lookup_f, &clean, &loops);
861 if (node != NULL) {
862 ret = node->value();
863 }
864 if (grow_hint != NULL) {
865 *grow_hint = loops > _grow_hint;
938 template <typename VALUE, typename CONFIG, MEMFLAGS F>
939 template <typename FUNC>
940 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
941 visit_nodes(Bucket* bucket, FUNC& visitor_f)
942 {
943 Node* current_node = bucket->first();
944 while (current_node != NULL) {
945 if (!visitor_f(current_node->value())) {
946 return false;
947 }
948 current_node = current_node->next();
949 }
950 return true;
951 }
952
953 template <typename VALUE, typename CONFIG, MEMFLAGS F>
954 template <typename FUNC>
955 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
956 do_scan_locked(Thread* thread, FUNC& scan_f)
957 {
958 assert(_resize_lock->owned_by_self() ||
959 (thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()),
960 "Re-size lock not held or not VMThread at safepoint");
961 // We can do a critical section over the entire loop but that would block
962 // updates for a long time. Instead we choose to block resizes.
963 InternalTable* table = get_table();
964 for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
965 ScopedCS cs(thread, this);
966 if (!visit_nodes(_table->get_bucket(bucket_it), scan_f)) {
967 break; /* ends critical section */
968 }
969 } /* ends critical section */
970 }
971
972 template <typename VALUE, typename CONFIG, MEMFLAGS F>
973 template <typename EVALUATE_FUNC>
974 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
975 delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
976 size_t num_del, Node** ndel)
977 {
978 size_t dels = 0;
979 Node* const volatile * rem_n_prev = bucket->first_ptr();
980 Node* rem_n = bucket->first();
981 while (rem_n != NULL) {
982 if (eval_f(rem_n->value())) {
983 ndel[dels++] = rem_n;
984 bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
985 rem_n = rem_n->next();
986 if (dels == num_del) {
1077 size_t hash = CONFIG::get_hash(value, &dead_hash);
1078 if (dead_hash) {
1079 return false;
1080 }
1081 // This is an unsafe operation.
1082 InternalTable* table = get_table();
1083 Bucket* bucket = get_bucket_in(table, hash);
1084 assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1085 Node* new_node = Node::create_node(value, bucket->first());
1086 if (!bucket->cas_first(new_node, bucket->first())) {
1087 assert(false, "bad");
1088 }
1089 return true;
1090 }
1091
1092 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1093 template <typename SCAN_FUNC>
1094 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1095 try_scan(Thread* thread, SCAN_FUNC& scan_f)
1096 {
1097 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1098 bool vm_and_safepoint = thread->is_VM_thread() &&
1099 SafepointSynchronize::is_at_safepoint();
1100 if (!vm_and_safepoint && !try_resize_lock(thread)) {
1101 return false;
1102 }
1103 do_scan_locked(thread, scan_f);
1104 if (!vm_and_safepoint) {
1105 unlock_resize_lock(thread);
1106 }
1107 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1108 return true;
1109 }
1110
1111 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1112 template <typename SCAN_FUNC>
1113 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1114 do_scan(Thread* thread, SCAN_FUNC& scan_f)
1115 {
1116 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1117 lock_resize_lock(thread);
1118 do_scan_locked(thread, scan_f);
1119 unlock_resize_lock(thread);
1120 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1121 }
1122
1123 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1124 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1125 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1126 try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1127 {
1128 if (!try_resize_lock(thread)) {
1129 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1130 return false;
1131 }
1132 do_bulk_delete_locked(thread, eval_f, del_f);
1133 unlock_resize_lock(thread);
1134 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1135 return true;
1136 }
1137
1138 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1139 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1140 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1141 bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1142 {
1143 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1144 lock_resize_lock(thread);
1145 do_bulk_delete_locked(thread, eval_f, del_f);
1146 unlock_resize_lock(thread);
1147 assert(!_resize_lock->owned_by_self(), "Re-size lock not held");
1148 }
1149
1150 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1151 template <typename VALUE_SIZE_FUNC>
1152 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1153 statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1154 outputStream* st, const char* table_name)
1155 {
1156 NumberSeq summary;
1157 size_t literal_bytes = 0;
1158 if ((thread->is_VM_thread() && !SafepointSynchronize::is_at_safepoint()) ||
1159 (!thread->is_VM_thread() && !try_resize_lock(thread))) {
1160 st->print_cr("statistics unavailable at this moment");
1161 return;
1162 }
1163
1164 InternalTable* table = get_table();
1165 for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1166 ScopedCS cs(thread, this);
1167 size_t count = 0;
1168 Bucket* bucket = _table->get_bucket(bucket_it);
1169 if (bucket->have_redirect() || bucket->is_locked()) {
1170 continue;
1171 }
1172 Node* current_node = bucket->first();
1173 while (current_node != NULL) {
1174 ++count;
1175 literal_bytes += vs_f(current_node->value());
1176 current_node = current_node->next();
1177 }
1178 summary.add((double)count);
1179 }
1180
1181 double num_buckets = summary.num();
1182 double num_entries = summary.sum();
1183
1184 size_t bucket_bytes = num_buckets * sizeof(Bucket);
1185 size_t entry_bytes = num_entries * sizeof(Node);
1186 size_t total_bytes = literal_bytes + bucket_bytes + entry_bytes;
1187
1188 size_t bucket_size = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets);
1191 st->print_cr("%s statistics:", table_name);
1192 st->print_cr("Number of buckets : %9" PRIuPTR " = %9" PRIuPTR
1193 " bytes, each " SIZE_FORMAT,
1194 (size_t)num_buckets, bucket_bytes, bucket_size);
1195 st->print_cr("Number of entries : %9" PRIuPTR " = %9" PRIuPTR
1196 " bytes, each " SIZE_FORMAT,
1197 (size_t)num_entries, entry_bytes, entry_size);
1198 if (literal_bytes != 0) {
1199 double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
1200 st->print_cr("Number of literals : %9" PRIuPTR " = %9" PRIuPTR
1201 " bytes, avg %7.3f",
1202 (size_t)num_entries, literal_bytes, literal_avg);
1203 }
1204 st->print_cr("Total footprsize_t : %9s = %9" PRIuPTR " bytes", ""
1205 , total_bytes);
1206 st->print_cr("Average bucket size : %9.3f", summary.avg());
1207 st->print_cr("Variance of bucket size : %9.3f", summary.variance());
1208 st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
1209 st->print_cr("Maximum bucket size : %9" PRIuPTR,
1210 (size_t)summary.maximum());
1211 if (!thread->is_VM_thread()) {
1212 unlock_resize_lock(thread);
1213 }
1214 }
1215
1216 #endif // include guard
|
276 template <typename VALUE, typename CONFIG, MEMFLAGS F>
277 template <bool b, typename EVALUATE_FUNC>
278 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
279 HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
280 EVALUATE_FUNC& eval_f,
281 Bucket* preb)
282 {
283 for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
284 if (eval_f(next->value())) {
285 return true;
286 }
287 }
288 return false;
289 }
290
291 // ConcurrentHashTable
292 template <typename VALUE, typename CONFIG, MEMFLAGS F>
293 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
294 write_synchonize_on_visible_epoch(Thread* thread)
295 {
296 assert(_resize_lock_owner == thread, "Re-size lock not held");
297 OrderAccess::fence(); // Prevent below load from floating up.
298 // If no reader saw this version we can skip write_synchronize.
299 if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
300 return;
301 }
302 assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
303 // We set this/next version that we are synchronizing for to not published.
304 // A reader will zero this flag if it reads this/next version.
305 OrderAccess::release_store(&_invisible_epoch, thread);
306 GlobalCounter::write_synchronize();
307 }
308
309 template <typename VALUE, typename CONFIG, MEMFLAGS F>
310 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
311 try_resize_lock(Thread* locker)
312 {
313 if (_resize_lock->try_lock()) {
314 if (_resize_lock_owner != NULL) {
315 assert(locker != _resize_lock_owner, "Already own lock");
316 // We got mutex but internal state is locked.
471 bucket->unlock();
472
473 if (rem_n == NULL) {
474 return false;
475 }
476 // Publish the deletion.
477 GlobalCounter::write_synchronize();
478 delete_f(rem_n->value());
479 Node::destroy_node(rem_n);
480 return true;
481 }
482
483 template <typename VALUE, typename CONFIG, MEMFLAGS F>
484 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
485 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
486 do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
487 EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
488 {
489 // Here we have resize lock so table is SMR safe, and there is no new
490 // table. Can do this in parallel if we want.
491 assert(_resize_lock_owner == thread, "Re-size lock not held");
492 Node* ndel[BULK_DELETE_LIMIT];
493 InternalTable* table = get_table();
494 assert(start_idx < stop_idx, "Must be");
495 assert(stop_idx <= _table->_size, "Must be");
496 // Here manual do critical section since we don't want to take the cost of
497 // locking the bucket if there is nothing to delete. But we can have
498 // concurrent single deletes. The _invisible_epoch can only be used by the
499 // owner of _resize_lock, us here. There we should not changed it in our
500 // own read-side.
501 GlobalCounter::critical_section_begin(thread);
502 for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
503 Bucket* bucket = table->get_bucket(bucket_it);
504 Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
505 table->get_bucket(bucket_it+1) : NULL;
506
507 if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
508 have_deletable(bucket, eval_f, prefetch_bucket)) {
509 // Nothing to remove in this bucket.
510 continue;
511 }
512
513 GlobalCounter::critical_section_end(thread);
514 // We left critical section but the bucket cannot be removed while we hold
515 // the _resize_lock.
516 bucket->lock();
517 size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
518 bucket->unlock();
519 write_synchonize_on_visible_epoch(thread);
520 for (size_t node_it = 0; node_it < nd; node_it++) {
521 del_f(ndel[node_it]->value());
522 Node::destroy_node(ndel[node_it]);
523 DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
524 }
525 GlobalCounter::critical_section_begin(thread);
678
679 // We can only move 1 pointer otherwise a reader might be moved to the wrong
680 // chain. E.g. looking for even hash value but got moved to the odd bucket
681 // chain.
682 write_synchonize_on_visible_epoch(thread);
683 if (delete_me != NULL) {
684 Node::destroy_node(delete_me);
685 delete_me = NULL;
686 }
687 }
688 return true;
689 }
690
691 template <typename VALUE, typename CONFIG, MEMFLAGS F>
692 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
693 internal_shrink_prolog(Thread* thread, size_t log2_size)
694 {
695 if (!try_resize_lock(thread)) {
696 return false;
697 }
698 assert(_resize_lock_owner == thread, "Re-size lock not held");
699 if (_table->_log2_size == _log2_start_size ||
700 _table->_log2_size <= log2_size) {
701 unlock_resize_lock(thread);
702 return false;
703 }
704 _new_table = new InternalTable(_table->_log2_size - 1);
705 return true;
706 }
707
708 template <typename VALUE, typename CONFIG, MEMFLAGS F>
709 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
710 internal_shrink_epilog(Thread* thread)
711 {
712 assert(_resize_lock_owner == thread, "Re-size lock not held");
713
714 InternalTable* old_table = set_table_from_new();
715 _size_limit_reached = false;
716 unlock_resize_lock(thread);
717 #ifdef ASSERT
718 for (size_t i = 0; i < old_table->_size; i++) {
719 assert(old_table->get_bucket(i++)->first() == POISON_PTR,
720 "No poison found");
721 }
722 #endif
723 // ABA safe, old_table not visible to any other threads.
724 delete old_table;
725 }
726
727 template <typename VALUE, typename CONFIG, MEMFLAGS F>
728 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
729 internal_shrink_range(Thread* thread, size_t start, size_t stop)
730 {
731 // The state is also copied here.
732 // Hence all buckets in new table will be locked.
749 b_old_even->redirect();
750 b_old_odd->redirect();
751
752 write_synchonize_on_visible_epoch(thread);
753
754 // Unlock for writes into new smaller table.
755 _new_table->get_bucket(bucket_it)->unlock();
756
757 DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
758 (Node*)POISON_PTR);)
759 DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
760 (Node*)POISON_PTR);)
761 }
762 }
763
764 template <typename VALUE, typename CONFIG, MEMFLAGS F>
765 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
766 internal_shrink(Thread* thread, size_t log2_size)
767 {
768 if (!internal_shrink_prolog(thread, log2_size)) {
769 assert(_resize_lock_owner != thread, "Re-size lock held");
770 return false;
771 }
772 assert(_resize_lock_owner == thread, "Should be locked by me");
773 internal_shrink_range(thread, 0, _new_table->_size);
774 internal_shrink_epilog(thread);
775 assert(_resize_lock_owner != thread, "Re-size lock held");
776 return true;
777 }
778
779 template <typename VALUE, typename CONFIG, MEMFLAGS F>
780 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
781 internal_grow_prolog(Thread* thread, size_t log2_size)
782 {
783 // This double checking of _size_limit_reached/is_max_size_reached()
784 // we only do in grow path, since grow means high load on table
785 // while shrink means low load.
786 if (is_max_size_reached()) {
787 return false;
788 }
789 if (!try_resize_lock(thread)) {
790 // Either we have an ongoing resize or an operation which doesn't want us
791 // to resize now.
792 return false;
793 }
794 if (is_max_size_reached() || _table->_log2_size >= log2_size) {
795 unlock_resize_lock(thread);
796 return false;
797 }
798
799 _new_table = new InternalTable(_table->_log2_size + 1);
800
801 if (_new_table->_log2_size == _log2_size_limit) {
802 _size_limit_reached = true;
803 }
804
805 return true;
806 }
807
808 template <typename VALUE, typename CONFIG, MEMFLAGS F>
809 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
810 internal_grow_epilog(Thread* thread)
811 {
812 assert(_resize_lock_owner == thread, "Should be locked");
813
814 InternalTable* old_table = set_table_from_new();
815 unlock_resize_lock(thread);
816 #ifdef ASSERT
817 for (size_t i = 0; i < old_table->_size; i++) {
818 assert(old_table->get_bucket(i++)->first() == POISON_PTR,
819 "No poison found");
820 }
821 #endif
822 // ABA safe, old_table not visible to any other threads.
823 delete old_table;
824 }
825
826 template <typename VALUE, typename CONFIG, MEMFLAGS F>
827 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
828 internal_grow(Thread* thread, size_t log2_size)
829 {
830 if (!internal_grow_prolog(thread, log2_size)) {
831 assert(_resize_lock_owner != thread, "Re-size lock held");
832 return false;
833 }
834 assert(_resize_lock_owner == thread, "Should be locked by me");
835 internal_grow_range(thread, 0, _table->_size);
836 internal_grow_epilog(thread);
837 assert(_resize_lock_owner != thread, "Re-size lock held");
838 return true;
839 }
840
841 // Always called within critical section
842 template <typename VALUE, typename CONFIG, MEMFLAGS F>
843 template <typename LOOKUP_FUNC>
844 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
845 internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
846 {
847 bool clean = false;
848 size_t loops = 0;
849 VALUE* ret = NULL;
850
851 const Bucket* bucket = get_bucket(lookup_f.get_hash());
852 Node* node = get_node(bucket, lookup_f, &clean, &loops);
853 if (node != NULL) {
854 ret = node->value();
855 }
856 if (grow_hint != NULL) {
857 *grow_hint = loops > _grow_hint;
930 template <typename VALUE, typename CONFIG, MEMFLAGS F>
931 template <typename FUNC>
932 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
933 visit_nodes(Bucket* bucket, FUNC& visitor_f)
934 {
935 Node* current_node = bucket->first();
936 while (current_node != NULL) {
937 if (!visitor_f(current_node->value())) {
938 return false;
939 }
940 current_node = current_node->next();
941 }
942 return true;
943 }
944
945 template <typename VALUE, typename CONFIG, MEMFLAGS F>
946 template <typename FUNC>
947 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
948 do_scan_locked(Thread* thread, FUNC& scan_f)
949 {
950 assert(_resize_lock_owner == thread, "Re-size lock not held");
951 // We can do a critical section over the entire loop but that would block
952 // updates for a long time. Instead we choose to block resizes.
953 InternalTable* table = get_table();
954 for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
955 ScopedCS cs(thread, this);
956 if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
957 break; /* ends critical section */
958 }
959 } /* ends critical section */
960 }
961
962 template <typename VALUE, typename CONFIG, MEMFLAGS F>
963 template <typename EVALUATE_FUNC>
964 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
965 delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
966 size_t num_del, Node** ndel)
967 {
968 size_t dels = 0;
969 Node* const volatile * rem_n_prev = bucket->first_ptr();
970 Node* rem_n = bucket->first();
971 while (rem_n != NULL) {
972 if (eval_f(rem_n->value())) {
973 ndel[dels++] = rem_n;
974 bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
975 rem_n = rem_n->next();
976 if (dels == num_del) {
1067 size_t hash = CONFIG::get_hash(value, &dead_hash);
1068 if (dead_hash) {
1069 return false;
1070 }
1071 // This is an unsafe operation.
1072 InternalTable* table = get_table();
1073 Bucket* bucket = get_bucket_in(table, hash);
1074 assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1075 Node* new_node = Node::create_node(value, bucket->first());
1076 if (!bucket->cas_first(new_node, bucket->first())) {
1077 assert(false, "bad");
1078 }
1079 return true;
1080 }
1081
1082 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1083 template <typename SCAN_FUNC>
1084 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1085 try_scan(Thread* thread, SCAN_FUNC& scan_f)
1086 {
1087 if (!try_resize_lock(thread)) {
1088 return false;
1089 }
1090 do_scan_locked(thread, scan_f);
1091 unlock_resize_lock(thread);
1092 return true;
1093 }
1094
1095 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1096 template <typename SCAN_FUNC>
1097 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1098 do_scan(Thread* thread, SCAN_FUNC& scan_f)
1099 {
1100 assert(_resize_lock_owner != thread, "Re-size lock held");
1101 lock_resize_lock(thread);
1102 do_scan_locked(thread, scan_f);
1103 unlock_resize_lock(thread);
1104 assert(_resize_lock_owner != thread, "Re-size lock held");
1105 }
1106
1107 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1108 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1109 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1110 try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1111 {
1112 if (!try_resize_lock(thread)) {
1113 return false;
1114 }
1115 do_bulk_delete_locked(thread, eval_f, del_f);
1116 unlock_resize_lock(thread);
1117 assert(_resize_lock_owner != thread, "Re-size lock held");
1118 return true;
1119 }
1120
1121 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1122 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1123 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1124 bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1125 {
1126 lock_resize_lock(thread);
1127 do_bulk_delete_locked(thread, eval_f, del_f);
1128 unlock_resize_lock(thread);
1129 }
1130
1131 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1132 template <typename VALUE_SIZE_FUNC>
1133 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
1134 statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1135 outputStream* st, const char* table_name)
1136 {
1137 NumberSeq summary;
1138 size_t literal_bytes = 0;
1139 if (!try_resize_lock(thread)) {
1140 st->print_cr("statistics unavailable at this moment");
1141 return;
1142 }
1143
1144 InternalTable* table = get_table();
1145 for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1146 ScopedCS cs(thread, this);
1147 size_t count = 0;
1148 Bucket* bucket = table->get_bucket(bucket_it);
1149 if (bucket->have_redirect() || bucket->is_locked()) {
1150 continue;
1151 }
1152 Node* current_node = bucket->first();
1153 while (current_node != NULL) {
1154 ++count;
1155 literal_bytes += vs_f(current_node->value());
1156 current_node = current_node->next();
1157 }
1158 summary.add((double)count);
1159 }
1160
1161 double num_buckets = summary.num();
1162 double num_entries = summary.sum();
1163
1164 size_t bucket_bytes = num_buckets * sizeof(Bucket);
1165 size_t entry_bytes = num_entries * sizeof(Node);
1166 size_t total_bytes = literal_bytes + bucket_bytes + entry_bytes;
1167
1168 size_t bucket_size = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets);
1171 st->print_cr("%s statistics:", table_name);
1172 st->print_cr("Number of buckets : %9" PRIuPTR " = %9" PRIuPTR
1173 " bytes, each " SIZE_FORMAT,
1174 (size_t)num_buckets, bucket_bytes, bucket_size);
1175 st->print_cr("Number of entries : %9" PRIuPTR " = %9" PRIuPTR
1176 " bytes, each " SIZE_FORMAT,
1177 (size_t)num_entries, entry_bytes, entry_size);
1178 if (literal_bytes != 0) {
1179 double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
1180 st->print_cr("Number of literals : %9" PRIuPTR " = %9" PRIuPTR
1181 " bytes, avg %7.3f",
1182 (size_t)num_entries, literal_bytes, literal_avg);
1183 }
1184 st->print_cr("Total footprsize_t : %9s = %9" PRIuPTR " bytes", ""
1185 , total_bytes);
1186 st->print_cr("Average bucket size : %9.3f", summary.avg());
1187 st->print_cr("Variance of bucket size : %9.3f", summary.variance());
1188 st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
1189 st->print_cr("Maximum bucket size : %9" PRIuPTR,
1190 (size_t)summary.maximum());
1191 unlock_resize_lock(thread);
1192 }
1193
1194 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1195 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
1196 try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht)
1197 {
1198 if (!try_resize_lock(thread)) {
1199 return false;
1200 }
1201 assert(_new_table == NULL, "Must be NULL");
1202 for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1203 Bucket* bucket = _table->get_bucket(bucket_it);
1204 assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1205 while (bucket->first() != NULL) {
1206 Node* move_node = bucket->first();
1207 bool ok = bucket->cas_first(move_node->next(), move_node);
1208 assert(ok, "Uncontended cas must work");
1209 bool dead_hash = false;
1210 size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1211 if (!dead_hash) {
1212 Bucket* insert_bucket = to_cht->get_bucket(insert_hash);
1213 assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present");
1214 move_node->set_next(insert_bucket->first());
1215 ok = insert_bucket->cas_first(move_node, insert_bucket->first());
1216 assert(ok, "Uncontended cas must work");
1217 }
1218 }
1219 }
1220 unlock_resize_lock(thread);
1221 return true;
1222 }
1223
1224 #endif // include guard
|