1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP
  26 #define SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "utilities/globalCounter.hpp"
  30 #include "utilities/globalDefinitions.hpp"
  31 #include "utilities/statistics.hpp"
  32 
  33 // A mostly concurrent-hash-table where the read-side is wait-free, inserts are
  34 // CAS and deletes mutual exclude each other on per bucket-basis. VALUE is the
  35 // type kept inside each Node and CONFIG contains hash and allocation methods.
  36 // A CALLBACK_FUNC and LOOKUP_FUNC needs to be provided for get and insert.
  37 
  38 class Thread;
  39 class Mutex;
  40 
  41 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  42 class ConcurrentHashTable : public CHeapObj<F> {
  43  private:
  44   // This is the internal node structure.
  45   // Only constructed with placement new from memory allocated with MEMFLAGS of
  46   // the InternalTable or user-defined memory.
  47   class Node {
  48    private:
  49     Node * volatile _next;
  50     VALUE _value;
  51    public:
  52     Node(const VALUE& value, Node* next = NULL)
  53       : _next(next), _value(value) {
  54       assert((((uintptr_t)this) & ((uintptr_t)0x3)) == 0,
  55              "Must 16 bit aligned.");
  56     }
  57 
  58     Node* next() const;
  59     void set_next(Node* node)         { _next = node; }
  60     Node* const volatile * next_ptr() { return &_next; }
  61 
  62     VALUE* value()                    { return &_value; }
  63 
  64     // Creates a node.
  65     static Node* create_node(const VALUE& value, Node* next = NULL) {
  66       return new (CONFIG::allocate_node(sizeof(Node), value)) Node(value, next);
  67     }
  68     // Destroys a node.
  69     static void destroy_node(Node* node) {
  70       CONFIG::free_node((void*)node, node->_value);
  71     }
  72 
  73     void print_on(outputStream* st) const {};
  74     void print_value_on(outputStream* st) const {};
  75   };
  76 
  77   // Only constructed with placement new from an array allocated with MEMFLAGS
  78   // of InternalTable.
  79   class Bucket {
  80    private:
  81 
  82     // Embedded state in two low bits in first pointer is a spinlock with 3
  83     // states, unlocked, locked, redirect. You must never busy-spin on trylock()
  84     // or call lock() without _resize_lock, that would deadlock. Redirect can
  85     // only be installed by owner and is the final state of a bucket.
  86     // The only two valid flows are:
  87     // unlocked -> locked -> unlocked
  88     // unlocked -> locked -> redirect
  89     // Locked state only applies to an updater.
  90     // Reader only check for redirect.
  91     Node * volatile _first;
  92 
  93     static const uintptr_t STATE_LOCK_BIT     = 0x1;
  94     static const uintptr_t STATE_REDIRECT_BIT = 0x2;
  95     static const uintptr_t STATE_MASK         = 0x3;
  96 
  97     // Get the first pointer unmasked.
  98     Node* first_raw() const;
  99 
 100     // Methods to manipulate the embedded.
 101     static bool is_state(Node* node, uintptr_t bits) {
 102       return (bits & (uintptr_t)node) == bits;
 103     }
 104 
 105     static Node* set_state(Node* n, uintptr_t bits) {
 106       return (Node*)(bits | (uintptr_t)n);
 107     }
 108 
 109     static uintptr_t get_state(Node* node) {
 110       return (((uintptr_t)node) & STATE_MASK);
 111     }
 112 
 113     static Node* clear_state(Node* node) {
 114       return (Node*)(((uintptr_t)node) & (~(STATE_MASK)));
 115     }
 116 
 117     static Node* clear_set_state(Node* node, Node* state) {
 118       return (Node*)(((uintptr_t)clear_state(node)) ^ get_state(state));
 119     }
 120 
 121    public:
 122     // A bucket is only one pointer with the embedded state.
 123     Bucket() : _first(NULL) {};
 124 
 125     // Get the first pointer unmasked.
 126     Node* first() const;
 127 
 128     // Get a pointer to the const first pointer. Do not deference this
 129     // pointer, the pointer pointed to _may_ contain an embedded state. Such
 130     // pointer should only be used as input to release_assign_node_ptr.
 131     Node* const volatile * first_ptr() { return &_first; }
 132 
 133     // This is the only place where a pointer to a Node pointer that potentially
 134     // is _first should be changed. Otherwise we destroy the embedded state. We
 135     // only give out pointer to const Node pointer to avoid accidental
 136     // assignment, thus here we must cast const part away. Method is not static
 137     // due to an assert.
 138     void release_assign_node_ptr(Node* const volatile * dst, Node* node) const;
 139 
 140     // This method assigns this buckets last Node next ptr to input Node.
 141     void release_assign_last_node_next(Node* node);
 142 
 143     // Setting the first pointer must be done with CAS.
 144     bool cas_first(Node *node, Node* expect);
 145 
 146     // Returns true if this bucket is redirecting to a new table.
 147     // Redirect is a terminal state and will never change.
 148     bool have_redirect() const;
 149 
 150     // Return true if this bucket is locked for updates.
 151     bool is_locked() const;
 152 
 153     // Return true if this bucket was locked.
 154     bool trylock();
 155 
 156     // The bucket might be invalid, due to a concurrent resize. The lock()
 157     // method do no respect that and can deadlock if caller do not hold
 158     // _resize_lock.
 159     void lock();
 160 
 161     // Unlocks this bucket.
 162     void unlock();
 163 
 164     // Installs redirect in this bucket.
 165     // Prior to doing so you must have successfully locked this bucket.
 166     void redirect();
 167   };
 168 
 169   // The backing storage table holding the buckets and it's size and mask-bits.
 170   // Table is always a power of two for two reasons:
 171   // - Re-size can only change the size into half or double
 172   //   (any pow 2 would also be possible).
 173   // - Use masking of hash for bucket index.
 174   class InternalTable : public CHeapObj<F> {
 175    private:
 176     Bucket* _buckets;        // Bucket array.
 177    public:
 178     const size_t _log2_size; // Size in log2.
 179     const size_t _size;      // Size in log10.
 180 
 181     // The mask used on hash for selecting bucket.
 182     // The masked value is guaranteed be to inside the buckets array.
 183     const size_t _hash_mask;
 184 
 185     // Create a backing table
 186     InternalTable(size_t log2_size);
 187     ~InternalTable();
 188 
 189     Bucket* get_buckets() { return _buckets; }
 190     Bucket* get_bucket(size_t idx) { return &_buckets[idx]; }
 191   };
 192 
 193   // Used as default functor when no functor supplied for some methods.
 194   struct NoOp {
 195     void operator()(VALUE*) {}
 196     const VALUE& operator()() {}
 197     void operator()(bool, VALUE*) {}
 198   } noOp;
 199 
 200   // For materializing a supplied value.
 201   class LazyValueRetrieve {
 202    private:
 203     const VALUE& _val;
 204    public:
 205     LazyValueRetrieve(const VALUE& val) : _val(val) {}
 206     const VALUE& operator()() { return _val; }
 207   };
 208 
 209   InternalTable* _table;      // Active table.
 210   InternalTable* _new_table;  // Table we are resizing to.
 211 
 212   // Default sizes
 213   static const size_t DEFAULT_MAX_SIZE_LOG2 = 21;
 214   static const size_t DEFAULT_START_SIZE_LOG2 = 13;
 215   static const size_t DEFAULT_GROW_HINT = 4; // Chain length
 216 
 217   const size_t _log2_size_limit;  // The biggest size.
 218   const size_t _log2_start_size;  // Start size.
 219   const size_t _grow_hint;        // Number of linked items
 220 
 221   volatile bool _size_limit_reached;
 222 
 223   // We serialize resizers and other bulk operations which do not support
 224   // concurrent resize with this lock.
 225   Mutex* _resize_lock;
 226   // Since we need to drop mutex for safepoints, but stop other threads from
 227   // taking the mutex after a safepoint this bool is the actual state. After
 228   // acquiring the mutex you must check if this is already locked. If so you
 229   // must drop the mutex until the real lock holder grabs the mutex.
 230   volatile Thread* _resize_lock_owner;
 231 
 232   // Return true if lock mutex/state succeeded.
 233   bool try_resize_lock(Thread* locker);
 234   // Returns when both mutex and state are proper locked.
 235   void lock_resize_lock(Thread* locker);
 236   // Unlocks mutex and state.
 237   void unlock_resize_lock(Thread* locker);
 238 
 239   // This method sets the _invisible_epoch and do a write_synchronize.
 240   // Subsequent calls check the state of _invisible_epoch and determine if the
 241   // write_synchronize can be avoided. If not, it sets the _invisible_epoch
 242   // again and do a write_synchronize.
 243   void write_synchonize_on_visible_epoch(Thread* thread);
 244   // To be-able to avoid write_synchronize in resize and other bulk operation,
 245   // this field keep tracks if a version of the hash-table was ever been seen.
 246   // We the working thread pointer as tag for debugging. The _invisible_epoch
 247   // can only be used by the owner of _resize_lock.
 248   volatile Thread* _invisible_epoch;
 249 
 250   // Scoped critical section, which also handles the invisible epochs.
 251   // An invisible epoch/version do not need a write_synchronize().
 252   class ScopedCS: public StackObj {
 253    protected:
 254     Thread* _thread;
 255     ConcurrentHashTable<VALUE, CONFIG, F>* _cht;
 256     GlobalCounter::CSContext _cs_context;
 257    public:
 258     ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht);
 259     ~ScopedCS();
 260   };
 261 
 262 
 263   // Max number of deletes in one bucket chain during bulk delete.
 264   static const size_t BULK_DELETE_LIMIT = 256;
 265 
 266   // Simple getters and setters for the internal table.
 267   InternalTable* get_table() const;
 268   InternalTable* get_new_table() const;
 269   InternalTable* set_table_from_new();
 270 
 271   // Destroys all nodes.
 272   void free_nodes();
 273 
 274   // Mask away high bits of hash.
 275   static size_t bucket_idx_hash(InternalTable* table, const uintx hash) {
 276     return ((size_t)hash) & table->_hash_mask;
 277   }
 278 
 279   // Returns bucket for hash for that internal table.
 280   Bucket* get_bucket_in(InternalTable* table, const uintx hash) const {
 281     size_t bucket_index = bucket_idx_hash(table, hash);
 282     return table->get_bucket(bucket_index);
 283   }
 284 
 285   // Return correct bucket for reading and handles resizing.
 286   Bucket* get_bucket(const uintx hash) const;
 287 
 288   // Return correct bucket for updates and handles resizing.
 289   Bucket* get_bucket_locked(Thread* thread, const uintx hash);
 290 
 291   // Finds a node.
 292   template <typename LOOKUP_FUNC>
 293   Node* get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
 294                  bool* have_dead, size_t* loops = NULL) const;
 295 
 296   // Method for shrinking.
 297   bool internal_shrink_prolog(Thread* thread, size_t log2_size);
 298   void internal_shrink_epilog(Thread* thread);
 299   void internal_shrink_range(Thread* thread, size_t start, size_t stop);
 300   bool internal_shrink(Thread* thread, size_t size_limit_log2);
 301 
 302   // Methods for growing.
 303   bool unzip_bucket(Thread* thread, InternalTable* old_table,
 304                     InternalTable* new_table, size_t even_index,
 305                     size_t odd_index);
 306   bool internal_grow_prolog(Thread* thread, size_t log2_size);
 307   void internal_grow_epilog(Thread* thread);
 308   void internal_grow_range(Thread* thread, size_t start, size_t stop);
 309   bool internal_grow(Thread* thread, size_t log2_size);
 310 
 311   // Get a value.
 312   template <typename LOOKUP_FUNC>
 313   VALUE* internal_get(Thread* thread, LOOKUP_FUNC& lookup_f,
 314                       bool* grow_hint = NULL);
 315 
 316   // Plain insert.
 317   template <typename LOOKUP_FUNC>
 318   bool internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
 319                        bool* grow_hint, bool* clean_hint);
 320 
 321   // Returns true if an item matching LOOKUP_FUNC is removed.
 322   // Calls DELETE_FUNC before destroying the node.
 323   template <typename LOOKUP_FUNC, typename DELETE_FUNC>
 324   bool internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f,
 325                        DELETE_FUNC& delete_f);
 326 
 327   // Visits nodes with FUNC.
 328   template <typename FUNC>
 329   static bool visit_nodes(Bucket* bucket, FUNC& visitor_f);
 330 
 331   // During shrink/grow we cannot guarantee that we only visit nodes once, with
 332   // current algorithm. To keep it simple caller will have locked
 333   // _resize_lock.
 334   template <typename FUNC>
 335   void do_scan_locked(Thread* thread, FUNC& scan_f);
 336 
 337   // Check for dead items in a bucket.
 338   template <typename EVALUATE_FUNC>
 339   size_t delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
 340                             size_t num_del, Node** ndel);
 341 
 342   // Check for dead items in this table. During shrink/grow we cannot guarantee
 343   // that we only visit nodes once. To keep it simple caller will have locked
 344   // _resize_lock.
 345   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 346   void do_bulk_delete_locked(Thread* thread, EVALUATE_FUNC& eval_f
 347                              , DELETE_FUNC& del_f) {
 348     do_bulk_delete_locked_for(thread, 0, _table->_size, eval_f, del_f);
 349   }
 350 
 351   // To have prefetching for a VALUE that is pointer during
 352   // do_bulk_delete_locked, we have this helper classes. One for non-pointer
 353   // case without prefect and one for pointer with prefect.
 354   template <bool b, typename EVALUATE_FUNC>
 355   struct HaveDeletables {
 356     static bool have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f,
 357                                Bucket* prefetch_bucket);
 358   };
 359   template<typename EVALUATE_FUNC>
 360   struct HaveDeletables<true, EVALUATE_FUNC> {
 361     static bool have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f,
 362                                Bucket* prefetch_bucket);
 363   };
 364 
 365   // Check for dead items in this table with range. During shrink/grow we cannot
 366   // guarantee that we only visit nodes once. To keep it simple caller will
 367   // have locked _resize_lock.
 368   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 369   void do_bulk_delete_locked_for(Thread* thread, size_t start_idx,
 370                                  size_t stop_idx, EVALUATE_FUNC& eval_f,
 371                                  DELETE_FUNC& del_f, bool is_mt = false);
 372 
 373   // Method to delete one items.
 374   template <typename LOOKUP_FUNC>
 375   void delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f);
 376 
 377  public:
 378   ConcurrentHashTable(size_t log2size = DEFAULT_START_SIZE_LOG2,
 379                       size_t log2size_limit = DEFAULT_MAX_SIZE_LOG2,
 380                       size_t grow_hint = DEFAULT_GROW_HINT);
 381 
 382   ~ConcurrentHashTable();
 383 
 384   TableRateStatistics _stats_rate;
 385 
 386   size_t get_size_log2(Thread* thread);
 387   size_t get_node_size() const { return sizeof(Node); }
 388   bool is_max_size_reached() { return _size_limit_reached; }
 389 
 390   // This means no paused bucket resize operation is going to resume
 391   // on this table.
 392   bool is_safepoint_safe() { return _resize_lock_owner == NULL; }
 393 
 394   // Re-size operations.
 395   bool shrink(Thread* thread, size_t size_limit_log2 = 0);
 396   bool grow(Thread* thread, size_t size_limit_log2 = 0);
 397 
 398   // All callbacks for get are under critical sections. Other callbacks may be
 399   // under critical section or may have locked parts of table. Calling any
 400   // methods on the table during a callback is not supported.Only MultiGetHandle
 401   // supports multiple gets.
 402 
 403   // Get methods return true on found item with LOOKUP_FUNC and FOUND_FUNC is
 404   // called.
 405   template <typename LOOKUP_FUNC, typename FOUND_FUNC>
 406   bool get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& foundf,
 407            bool* grow_hint = NULL);
 408 
 409   // Returns true true if the item was inserted, duplicates are found with
 410   // LOOKUP_FUNC.
 411   template <typename LOOKUP_FUNC>
 412   bool insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
 413               bool* grow_hint = NULL, bool* clean_hint = NULL) {
 414     return internal_insert(thread, lookup_f, value, grow_hint, clean_hint);
 415   }
 416 
 417   // This does a fast unsafe insert and can thus only be used when there is no
 418   // risk for a duplicates and no other threads uses this table.
 419   bool unsafe_insert(const VALUE& value);
 420 
 421   // Returns true if items was deleted matching LOOKUP_FUNC and
 422   // prior to destruction DELETE_FUNC is called.
 423   template <typename LOOKUP_FUNC, typename DELETE_FUNC>
 424   bool remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& del_f) {
 425     return internal_remove(thread, lookup_f, del_f);
 426   }
 427 
 428   // Same without DELETE_FUNC.
 429   template <typename LOOKUP_FUNC>
 430   bool remove(Thread* thread, LOOKUP_FUNC& lookup_f) {
 431     return internal_remove(thread, lookup_f, noOp);
 432   }
 433 
 434   // Visit all items with SCAN_FUNC if no concurrent resize. Takes the resize
 435   // lock to avoid concurrent resizes. Else returns false.
 436   template <typename SCAN_FUNC>
 437   bool try_scan(Thread* thread, SCAN_FUNC& scan_f);
 438 
 439   // Visit all items with SCAN_FUNC when the resize lock is obtained.
 440   template <typename SCAN_FUNC>
 441   void do_scan(Thread* thread, SCAN_FUNC& scan_f);
 442 
 443   // Visit all items with SCAN_FUNC without any protection.
 444   // It will assume there is no other thread accessing this
 445   // table during the safepoint. Must be called with VM thread.
 446   template <typename SCAN_FUNC>
 447   void do_safepoint_scan(SCAN_FUNC& scan_f);
 448 
 449   // Destroying items matching EVALUATE_FUNC, before destroying items
 450   // DELETE_FUNC is called, if resize lock is obtained. Else returns false.
 451   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 452   bool try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f,
 453                        DELETE_FUNC& del_f);
 454 
 455   // Destroying items matching EVALUATE_FUNC, before destroying items
 456   // DELETE_FUNC is called, when the resize lock is successfully obtained.
 457   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 458   void bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f);
 459 
 460   // Calcuate statistics. Item sizes are calculated with VALUE_SIZE_FUNC.
 461   template <typename VALUE_SIZE_FUNC>
 462   TableStatistics statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f);
 463 
 464   // Writes statistics to the outputStream. Item sizes are calculated with
 465   // VALUE_SIZE_FUNC.
 466   template <typename VALUE_SIZE_FUNC>
 467   void statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, outputStream* st,
 468                      const char* table_name);
 469 
 470   // Moves all nodes from this table to to_cht
 471   bool try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht);
 472 
 473   // This is a Curiously Recurring Template Pattern (CRPT) interface for the
 474   // specialization.
 475   struct BaseConfig {
 476    public:
 477     // Called when the hash table needs the hash for a VALUE.
 478     static uintx get_hash(const VALUE& value, bool* dead) {
 479       return CONFIG::get_hash(value, dead);
 480     }
 481     // Default node allocation.
 482     static void* allocate_node(size_t size, const VALUE& value);
 483     // Default node reclamation.
 484     static void free_node(void* memory, const VALUE& value);
 485   };
 486 
 487   // Scoped multi getter.
 488   class MultiGetHandle : private ScopedCS {
 489    public:
 490     MultiGetHandle(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht)
 491       : ScopedCS(thread, cht) {}
 492     // In the MultiGetHandle scope you can lookup items matching LOOKUP_FUNC.
 493     // The VALUEs are safe as long as you never save the VALUEs outside the
 494     // scope, e.g. after ~MultiGetHandle().
 495     template <typename LOOKUP_FUNC>
 496     VALUE* get(LOOKUP_FUNC& lookup_f, bool* grow_hint = NULL);
 497   };
 498 
 499  private:
 500   class BucketsOperation;
 501 
 502  public:
 503   class BulkDeleteTask;
 504   class GrowTask;
 505 };
 506 
 507 #endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP