1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_TASKS_INLINE_HPP
  26 #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_TASKS_INLINE_HPP
  27 
  28 #include "utilities/concurrentHashTable.inline.hpp"
  29 
  30 // This inline file contains BulkDeleteTask and GrowTasks which are both bucket
  31 // operations, which they are serialized with each other.
  32 
  33 // Base class for pause and/or parallel bulk operations.
  34 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  35 class ConcurrentHashTable<VALUE, CONFIG, F>::BucketsOperation {
  36  protected:
  37   ConcurrentHashTable<VALUE, CONFIG, F>* _cht;
  38 
  39   // Default size of _task_size_log2
  40   static const size_t DEFAULT_TASK_SIZE_LOG2 = 12;
  41 
  42   // The table is split into ranges, every increment is one range.
  43   volatile size_t _next_to_claim;
  44   size_t _task_size_log2; // Number of buckets.
  45   size_t _stop_task;      // Last task
  46   size_t _size_log2;      // Table size.
  47 
  48   BucketsOperation(ConcurrentHashTable<VALUE, CONFIG, F>* cht)
  49     : _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2),
  50     _stop_task(0), _size_log2(0) {}
  51 
  52   // Returns true if you succeeded to claim the range start -> (stop-1).
  53   bool claim(size_t* start, size_t* stop) {
  54     size_t claimed = Atomic::add((size_t)1, &_next_to_claim) - 1;
  55     if (claimed >= _stop_task) {
  56       return false;
  57     }
  58     *start = claimed * (((size_t)1) << _task_size_log2);
  59     *stop  = ((*start) + (((size_t)1) << _task_size_log2));
  60     return true;
  61   }
  62 
  63   // Calculate starting values.
  64   void setup() {
  65     _size_log2 = _cht->_table->_log2_size;
  66     size_t tmp = _size_log2 > _task_size_log2 ?
  67                  _size_log2 - _task_size_log2 : 0;
  68     _stop_task = (((size_t)1) << tmp);
  69   }
  70 
  71   // Returns false if all ranges are claimed.
  72   bool have_more_work() {
  73     return OrderAccess::load_acquire(&_next_to_claim) >= _stop_task;
  74   }
  75 
  76   // If we have changed size.
  77   bool is_same_table() {
  78     // Not entirely true.
  79     return _size_log2 != _cht->_table->_log2_size;
  80   }
  81 
  82   void thread_owns_resize_lock(Thread* thread) {
  83     assert(BucketsOperation::_cht->_resize_lock_owner == thread,
  84            "Should be locked by me");
  85     assert(BucketsOperation::_cht->_resize_lock->owned_by_self(),
  86            "Operations lock not held");
  87   }
  88   void thread_owns_only_state_lock(Thread* thread) {
  89     assert(BucketsOperation::_cht->_resize_lock_owner == thread,
  90            "Should be locked by me");
  91     assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
  92            "Operations lock held");
  93   }
  94   void thread_do_not_own_resize_lock(Thread* thread) {
  95     assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
  96            "Operations lock held");
  97     assert(BucketsOperation::_cht->_resize_lock_owner != thread,
  98            "Should not be locked by me");
  99   }
 100 };
 101 
 102 // For doing pausable/parallel bulk delete.
 103 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 104 class ConcurrentHashTable<VALUE, CONFIG, F>::BulkDeleteTask :
 105   public BucketsOperation
 106 {
 107  public:
 108   BulkDeleteTask(ConcurrentHashTable<VALUE, CONFIG, F>* cht)
 109     : BucketsOperation(cht) {
 110   }
 111   // Before start prepare must be called.
 112   bool prepare(Thread* thread) {
 113     bool lock = BucketsOperation::_cht->try_resize_lock(thread);
 114     if (!lock) {
 115       return false;
 116     }
 117     this->setup();
 118     this->thread_owns_resize_lock(thread);
 119     return true;
 120   }
 121 
 122   // Does one range destroying all matching EVALUATE_FUNC and
 123   // DELETE_FUNC is called be destruction. Returns true if there is more work.
 124   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 125   bool doTask(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) {
 126     size_t start, stop;
 127     assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
 128            "Should be locked");
 129     if (!this->claim(&start, &stop)) {
 130       return false;
 131     }
 132     BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop,
 133                                                       eval_f, del_f);
 134     return true;
 135   }
 136 
 137   // Pauses this operations for a safepoint.
 138   void pause(Thread* thread) {
 139     this->thread_owns_resize_lock(thread);
 140     // This leaves internal state locked.
 141     BucketsOperation::_cht->unlock_resize_lock(thread);
 142     this->thread_do_not_own_resize_lock(thread);
 143   }
 144 
 145   // Continues this operations after a safepoint.
 146   bool cont(Thread* thread) {
 147     this->thread_do_not_own_resize_lock(thread);
 148     if (!BucketsOperation::_cht->try_resize_lock(thread)) {
 149       this->thread_do_not_own_resize_lock(thread);
 150       return false;
 151     }
 152     if (BucketsOperation::is_same_table()) {
 153       BucketsOperation::_cht->unlock_resize_lock(thread);
 154       this->thread_do_not_own_resize_lock(thread);
 155       return false;
 156     }
 157     this->thread_owns_resize_lock(thread);
 158     return true;
 159   }
 160 
 161   // Must be called after ranges are done.
 162   void done(Thread* thread) {
 163     this->thread_owns_resize_lock(thread);
 164     BucketsOperation::_cht->unlock_resize_lock(thread);
 165     this->thread_do_not_own_resize_lock(thread);
 166   }
 167 };
 168 
 169 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 170 class ConcurrentHashTable<VALUE, CONFIG, F>::GrowTask :
 171   public BucketsOperation
 172 {
 173  public:
 174   GrowTask(ConcurrentHashTable<VALUE, CONFIG, F>* cht) : BucketsOperation(cht) {
 175   }
 176   // Before start prepare must be called.
 177   bool prepare(Thread* thread) {
 178     if (!BucketsOperation::_cht->internal_grow_prolog(
 179           thread, BucketsOperation::_cht->_log2_size_limit)) {
 180       return false;
 181     }
 182     this->thread_owns_resize_lock(thread);
 183     BucketsOperation::setup();
 184     return true;
 185   }
 186 
 187   // Re-sizes a portion of the table. Returns true if there is more work.
 188   bool doTask(Thread* thread) {
 189     size_t start, stop;
 190     assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
 191            "Should be locked");
 192     if (!this->claim(&start, &stop)) {
 193       return false;
 194     }
 195     BucketsOperation::_cht->internal_grow_range(thread, start, stop);
 196     assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
 197            "Should be locked");
 198     return true;
 199   }
 200 
 201   // Pauses growing for safepoint
 202   void pause(Thread* thread) {
 203     // This leaves internal state locked.
 204     this->thread_owns_resize_lock(thread);
 205     BucketsOperation::_cht->_resize_lock->unlock();
 206     this->thread_owns_only_state_lock(thread);
 207   }
 208 
 209   // Continues growing after safepoint.
 210   void cont(Thread* thread) {
 211     this->thread_owns_only_state_lock(thread);
 212     // If someone slips in here directly after safepoint.
 213     while (!BucketsOperation::_cht->_resize_lock->try_lock())
 214       { /* for ever */ };
 215     this->thread_owns_resize_lock(thread);
 216   }
 217 
 218   // Must be called after doTask returns false.
 219   void done(Thread* thread) {
 220     this->thread_owns_resize_lock(thread);
 221     BucketsOperation::_cht->internal_grow_epilog(thread);
 222     this->thread_do_not_own_resize_lock(thread);
 223   }
 224 };
 225 
 226 #endif // include guard