< prev index next >

src/hotspot/share/utilities/concurrentHashTable.inline.hpp

Print this page




  41 
  42 // Number from spinYield.hpp. In some loops SpinYield would be unfair.
  43 #define SPINPAUSES_PER_YIELD 8192
  44 
  45 #ifdef ASSERT
  46 #ifdef _LP64
  47 // Two low bits are not usable.
  48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
  49 #else
  50 // Two low bits are not usable.
  51 static const void* POISON_PTR = (void*)0xffbadbac;
  52 #endif
  53 #endif
  54 
  55 // Node
  56 template <typename CONFIG, MEMFLAGS F>
  57 inline typename ConcurrentHashTable<CONFIG, F>::Node*
  58 ConcurrentHashTable<CONFIG, F>::
  59   Node::next() const
  60 {
  61   return OrderAccess::load_acquire(&_next);
  62 }
  63 
  64 // Bucket
  65 template <typename CONFIG, MEMFLAGS F>
  66 inline typename ConcurrentHashTable<CONFIG, F>::Node*
  67 ConcurrentHashTable<CONFIG, F>::
  68   Bucket::first_raw() const
  69 {
  70   return OrderAccess::load_acquire(&_first);
  71 }
  72 
  73 template <typename CONFIG, MEMFLAGS F>
  74 inline void ConcurrentHashTable<CONFIG, F>::
  75   Bucket::release_assign_node_ptr(
  76     typename ConcurrentHashTable<CONFIG, F>::Node* const volatile * dst,
  77     typename ConcurrentHashTable<CONFIG, F>::Node* node) const
  78 {
  79   // Due to this assert this methods is not static.
  80   assert(is_locked(), "Must be locked.");
  81   Node** tmp = (Node**)dst;
  82   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
  83 }
  84 
  85 template <typename CONFIG, MEMFLAGS F>
  86 inline typename ConcurrentHashTable<CONFIG, F>::Node*
  87 ConcurrentHashTable<CONFIG, F>::
  88   Bucket::first() const
  89 {
  90   // We strip the states bit before returning the ptr.
  91   return clear_state(OrderAccess::load_acquire(&_first));
  92 }
  93 
  94 template <typename CONFIG, MEMFLAGS F>
  95 inline bool ConcurrentHashTable<CONFIG, F>::
  96   Bucket::have_redirect() const
  97 {
  98   return is_state(first_raw(), STATE_REDIRECT_BIT);
  99 }
 100 
 101 template <typename CONFIG, MEMFLAGS F>
 102 inline bool ConcurrentHashTable<CONFIG, F>::
 103   Bucket::is_locked() const
 104 {
 105   return is_state(first_raw(), STATE_LOCK_BIT);
 106 }
 107 
 108 template <typename CONFIG, MEMFLAGS F>
 109 inline void ConcurrentHashTable<CONFIG, F>::
 110   Bucket::lock()
 111 {


 156   Bucket::trylock()
 157 {
 158   if (is_locked()) {
 159     return false;
 160   }
 161   // We will expect a clean first pointer.
 162   Node* tmp = first();
 163   if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) {
 164     return true;
 165   }
 166   return false;
 167 }
 168 
 169 template <typename CONFIG, MEMFLAGS F>
 170 inline void ConcurrentHashTable<CONFIG, F>::
 171   Bucket::unlock()
 172 {
 173   assert(is_locked(), "Must be locked.");
 174   assert(!have_redirect(),
 175          "Unlocking a bucket after it has reached terminal state.");
 176   OrderAccess::release_store(&_first, clear_state(first()));
 177 }
 178 
 179 template <typename CONFIG, MEMFLAGS F>
 180 inline void ConcurrentHashTable<CONFIG, F>::
 181   Bucket::redirect()
 182 {
 183   assert(is_locked(), "Must be locked.");
 184   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
 185 }
 186 
 187 // InternalTable
 188 template <typename CONFIG, MEMFLAGS F>
 189 inline ConcurrentHashTable<CONFIG, F>::
 190   InternalTable::InternalTable(size_t log2_size)
 191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
 192       _hash_mask(~(~((size_t)0) << _log2_size))
 193 {
 194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
 195          "Bad size");
 196   _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F);
 197   // Use placement new for each element instead of new[] which could use more
 198   // memory than allocated.
 199   for (size_t i = 0; i < _size; ++i) {
 200     new (_buckets + i) Bucket();
 201   }
 202 }
 203 
 204 template <typename CONFIG, MEMFLAGS F>
 205 inline ConcurrentHashTable<CONFIG, F>::
 206   InternalTable::~InternalTable()
 207 {
 208   FREE_C_HEAP_ARRAY(Bucket, _buckets);
 209 }
 210 
 211 // ScopedCS
 212 template <typename CONFIG, MEMFLAGS F>
 213 inline ConcurrentHashTable<CONFIG, F>::
 214   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht)
 215     : _thread(thread),
 216       _cht(cht),
 217       _cs_context(GlobalCounter::critical_section_begin(_thread))
 218 {
 219   // This version is published now.
 220   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
 221     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
 222   }
 223 }
 224 
 225 template <typename CONFIG, MEMFLAGS F>
 226 inline ConcurrentHashTable<CONFIG, F>::
 227   ScopedCS::~ScopedCS()
 228 {
 229   GlobalCounter::critical_section_end(_thread, _cs_context);
 230 }
 231 
 232 template <typename CONFIG, MEMFLAGS F>
 233 template <typename LOOKUP_FUNC>
 234 inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
 235   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
 236 {
 237   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
 238 }
 239 
 240 // HaveDeletables
 241 template <typename CONFIG, MEMFLAGS F>


 272   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 273                                                    EVALUATE_FUNC& eval_f,
 274                                                    Bucket* preb)
 275 {
 276   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 277     if (eval_f(next->value())) {
 278       return true;
 279     }
 280   }
 281   return false;
 282 }
 283 
 284 // ConcurrentHashTable
 285 template <typename CONFIG, MEMFLAGS F>
 286 inline void ConcurrentHashTable<CONFIG, F>::
 287   write_synchonize_on_visible_epoch(Thread* thread)
 288 {
 289   assert(_resize_lock_owner == thread, "Re-size lock not held");
 290   OrderAccess::fence(); // Prevent below load from floating up.
 291   // If no reader saw this version we can skip write_synchronize.
 292   if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
 293     return;
 294   }
 295   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
 296   // We set this/next version that we are synchronizing for to not published.
 297   // A reader will zero this flag if it reads this/next version.
 298   OrderAccess::release_store(&_invisible_epoch, thread);
 299   GlobalCounter::write_synchronize();
 300 }
 301 
 302 template <typename CONFIG, MEMFLAGS F>
 303 inline bool ConcurrentHashTable<CONFIG, F>::
 304   try_resize_lock(Thread* locker)
 305 {
 306   if (_resize_lock->try_lock()) {
 307     if (_resize_lock_owner != NULL) {
 308       assert(locker != _resize_lock_owner, "Already own lock");
 309       // We got mutex but internal state is locked.
 310       _resize_lock->unlock();
 311       return false;
 312     }
 313   } else {
 314     return false;
 315   }
 316   _invisible_epoch = 0;
 317   _resize_lock_owner = locker;
 318   return true;


 357 inline void ConcurrentHashTable<CONFIG, F>::
 358   free_nodes()
 359 {
 360   // We assume we are not MT during freeing.
 361   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
 362     Bucket* bucket = _table->get_buckets() + node_it;
 363     Node* node = bucket->first();
 364     while (node != NULL) {
 365       Node* free_node = node;
 366       node = node->next();
 367       Node::destroy_node(free_node);
 368     }
 369   }
 370 }
 371 
 372 template <typename CONFIG, MEMFLAGS F>
 373 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
 374 ConcurrentHashTable<CONFIG, F>::
 375   get_table() const
 376 {
 377   return OrderAccess::load_acquire(&_table);
 378 }
 379 
 380 template <typename CONFIG, MEMFLAGS F>
 381 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
 382 ConcurrentHashTable<CONFIG, F>::
 383   get_new_table() const
 384 {
 385   return OrderAccess::load_acquire(&_new_table);
 386 }
 387 
 388 template <typename CONFIG, MEMFLAGS F>
 389 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
 390 ConcurrentHashTable<CONFIG, F>::
 391   set_table_from_new()
 392 {
 393   InternalTable* old_table = _table;
 394   // Publish the new table.
 395   OrderAccess::release_store(&_table, _new_table);
 396   // All must see this.
 397   GlobalCounter::write_synchronize();
 398   // _new_table not read any more.
 399   _new_table = NULL;
 400   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
 401   return old_table;
 402 }
 403 
 404 template <typename CONFIG, MEMFLAGS F>
 405 inline void ConcurrentHashTable<CONFIG, F>::
 406   internal_grow_range(Thread* thread, size_t start, size_t stop)
 407 {
 408   assert(stop <= _table->_size, "Outside backing array");
 409   assert(_new_table != NULL, "Grow not proper setup before start");
 410   // The state is also copied here. Hence all buckets in new table will be
 411   // locked. I call the siblings odd/even, where even have high bit 0 and odd
 412   // have high bit 1.
 413   for (size_t even_index = start; even_index < stop; even_index++) {
 414     Bucket* bucket = _table->get_bucket(even_index);
 415 




  41 
  42 // Number from spinYield.hpp. In some loops SpinYield would be unfair.
  43 #define SPINPAUSES_PER_YIELD 8192
  44 
  45 #ifdef ASSERT
  46 #ifdef _LP64
  47 // Two low bits are not usable.
  48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
  49 #else
  50 // Two low bits are not usable.
  51 static const void* POISON_PTR = (void*)0xffbadbac;
  52 #endif
  53 #endif
  54 
  55 // Node
  56 template <typename CONFIG, MEMFLAGS F>
  57 inline typename ConcurrentHashTable<CONFIG, F>::Node*
  58 ConcurrentHashTable<CONFIG, F>::
  59   Node::next() const
  60 {
  61   return Atomic::load_acquire(&_next);
  62 }
  63 
  64 // Bucket
  65 template <typename CONFIG, MEMFLAGS F>
  66 inline typename ConcurrentHashTable<CONFIG, F>::Node*
  67 ConcurrentHashTable<CONFIG, F>::
  68   Bucket::first_raw() const
  69 {
  70   return Atomic::load_acquire(&_first);
  71 }
  72 
  73 template <typename CONFIG, MEMFLAGS F>
  74 inline void ConcurrentHashTable<CONFIG, F>::
  75   Bucket::release_assign_node_ptr(
  76     typename ConcurrentHashTable<CONFIG, F>::Node* const volatile * dst,
  77     typename ConcurrentHashTable<CONFIG, F>::Node* node) const
  78 {
  79   // Due to this assert this methods is not static.
  80   assert(is_locked(), "Must be locked.");
  81   Node** tmp = (Node**)dst;
  82   Atomic::release_store(tmp, clear_set_state(node, *dst));
  83 }
  84 
  85 template <typename CONFIG, MEMFLAGS F>
  86 inline typename ConcurrentHashTable<CONFIG, F>::Node*
  87 ConcurrentHashTable<CONFIG, F>::
  88   Bucket::first() const
  89 {
  90   // We strip the states bit before returning the ptr.
  91   return clear_state(Atomic::load_acquire(&_first));
  92 }
  93 
  94 template <typename CONFIG, MEMFLAGS F>
  95 inline bool ConcurrentHashTable<CONFIG, F>::
  96   Bucket::have_redirect() const
  97 {
  98   return is_state(first_raw(), STATE_REDIRECT_BIT);
  99 }
 100 
 101 template <typename CONFIG, MEMFLAGS F>
 102 inline bool ConcurrentHashTable<CONFIG, F>::
 103   Bucket::is_locked() const
 104 {
 105   return is_state(first_raw(), STATE_LOCK_BIT);
 106 }
 107 
 108 template <typename CONFIG, MEMFLAGS F>
 109 inline void ConcurrentHashTable<CONFIG, F>::
 110   Bucket::lock()
 111 {


 156   Bucket::trylock()
 157 {
 158   if (is_locked()) {
 159     return false;
 160   }
 161   // We will expect a clean first pointer.
 162   Node* tmp = first();
 163   if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) {
 164     return true;
 165   }
 166   return false;
 167 }
 168 
 169 template <typename CONFIG, MEMFLAGS F>
 170 inline void ConcurrentHashTable<CONFIG, F>::
 171   Bucket::unlock()
 172 {
 173   assert(is_locked(), "Must be locked.");
 174   assert(!have_redirect(),
 175          "Unlocking a bucket after it has reached terminal state.");
 176   Atomic::release_store(&_first, clear_state(first()));
 177 }
 178 
 179 template <typename CONFIG, MEMFLAGS F>
 180 inline void ConcurrentHashTable<CONFIG, F>::
 181   Bucket::redirect()
 182 {
 183   assert(is_locked(), "Must be locked.");
 184   Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
 185 }
 186 
 187 // InternalTable
 188 template <typename CONFIG, MEMFLAGS F>
 189 inline ConcurrentHashTable<CONFIG, F>::
 190   InternalTable::InternalTable(size_t log2_size)
 191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
 192       _hash_mask(~(~((size_t)0) << _log2_size))
 193 {
 194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
 195          "Bad size");
 196   _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F);
 197   // Use placement new for each element instead of new[] which could use more
 198   // memory than allocated.
 199   for (size_t i = 0; i < _size; ++i) {
 200     new (_buckets + i) Bucket();
 201   }
 202 }
 203 
 204 template <typename CONFIG, MEMFLAGS F>
 205 inline ConcurrentHashTable<CONFIG, F>::
 206   InternalTable::~InternalTable()
 207 {
 208   FREE_C_HEAP_ARRAY(Bucket, _buckets);
 209 }
 210 
 211 // ScopedCS
 212 template <typename CONFIG, MEMFLAGS F>
 213 inline ConcurrentHashTable<CONFIG, F>::
 214   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht)
 215     : _thread(thread),
 216       _cht(cht),
 217       _cs_context(GlobalCounter::critical_section_begin(_thread))
 218 {
 219   // This version is published now.
 220   if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) {
 221     Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
 222   }
 223 }
 224 
 225 template <typename CONFIG, MEMFLAGS F>
 226 inline ConcurrentHashTable<CONFIG, F>::
 227   ScopedCS::~ScopedCS()
 228 {
 229   GlobalCounter::critical_section_end(_thread, _cs_context);
 230 }
 231 
 232 template <typename CONFIG, MEMFLAGS F>
 233 template <typename LOOKUP_FUNC>
 234 inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
 235   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
 236 {
 237   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
 238 }
 239 
 240 // HaveDeletables
 241 template <typename CONFIG, MEMFLAGS F>


 272   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
 273                                                    EVALUATE_FUNC& eval_f,
 274                                                    Bucket* preb)
 275 {
 276   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
 277     if (eval_f(next->value())) {
 278       return true;
 279     }
 280   }
 281   return false;
 282 }
 283 
 284 // ConcurrentHashTable
 285 template <typename CONFIG, MEMFLAGS F>
 286 inline void ConcurrentHashTable<CONFIG, F>::
 287   write_synchonize_on_visible_epoch(Thread* thread)
 288 {
 289   assert(_resize_lock_owner == thread, "Re-size lock not held");
 290   OrderAccess::fence(); // Prevent below load from floating up.
 291   // If no reader saw this version we can skip write_synchronize.
 292   if (Atomic::load_acquire(&_invisible_epoch) == thread) {
 293     return;
 294   }
 295   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
 296   // We set this/next version that we are synchronizing for to not published.
 297   // A reader will zero this flag if it reads this/next version.
 298   Atomic::release_store(&_invisible_epoch, thread);
 299   GlobalCounter::write_synchronize();
 300 }
 301 
 302 template <typename CONFIG, MEMFLAGS F>
 303 inline bool ConcurrentHashTable<CONFIG, F>::
 304   try_resize_lock(Thread* locker)
 305 {
 306   if (_resize_lock->try_lock()) {
 307     if (_resize_lock_owner != NULL) {
 308       assert(locker != _resize_lock_owner, "Already own lock");
 309       // We got mutex but internal state is locked.
 310       _resize_lock->unlock();
 311       return false;
 312     }
 313   } else {
 314     return false;
 315   }
 316   _invisible_epoch = 0;
 317   _resize_lock_owner = locker;
 318   return true;


 357 inline void ConcurrentHashTable<CONFIG, F>::
 358   free_nodes()
 359 {
 360   // We assume we are not MT during freeing.
 361   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
 362     Bucket* bucket = _table->get_buckets() + node_it;
 363     Node* node = bucket->first();
 364     while (node != NULL) {
 365       Node* free_node = node;
 366       node = node->next();
 367       Node::destroy_node(free_node);
 368     }
 369   }
 370 }
 371 
 372 template <typename CONFIG, MEMFLAGS F>
 373 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
 374 ConcurrentHashTable<CONFIG, F>::
 375   get_table() const
 376 {
 377   return Atomic::load_acquire(&_table);
 378 }
 379 
 380 template <typename CONFIG, MEMFLAGS F>
 381 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
 382 ConcurrentHashTable<CONFIG, F>::
 383   get_new_table() const
 384 {
 385   return Atomic::load_acquire(&_new_table);
 386 }
 387 
 388 template <typename CONFIG, MEMFLAGS F>
 389 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
 390 ConcurrentHashTable<CONFIG, F>::
 391   set_table_from_new()
 392 {
 393   InternalTable* old_table = _table;
 394   // Publish the new table.
 395   Atomic::release_store(&_table, _new_table);
 396   // All must see this.
 397   GlobalCounter::write_synchronize();
 398   // _new_table not read any more.
 399   _new_table = NULL;
 400   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
 401   return old_table;
 402 }
 403 
 404 template <typename CONFIG, MEMFLAGS F>
 405 inline void ConcurrentHashTable<CONFIG, F>::
 406   internal_grow_range(Thread* thread, size_t start, size_t stop)
 407 {
 408   assert(stop <= _table->_size, "Outside backing array");
 409   assert(_new_table != NULL, "Grow not proper setup before start");
 410   // The state is also copied here. Hence all buckets in new table will be
 411   // locked. I call the siblings odd/even, where even have high bit 0 and odd
 412   // have high bit 1.
 413   for (size_t even_index = start; even_index < stop; even_index++) {
 414     Bucket* bucket = _table->get_bucket(even_index);
 415 


< prev index next >