1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_TASKS_INLINE_HPP
  26 #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_TASKS_INLINE_HPP
  27 
  28 #include "utilities/globalDefinitions.hpp"
  29 #include "utilities/concurrentHashTable.inline.hpp"
  30 
  31 // This inline file contains BulkDeleteTask and GrowTasks which are both bucket
  32 // operations, which they are serialized with each other.
  33 
  34 // Base class for pause and/or parallel bulk operations.
  35 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  36 class ConcurrentHashTable<VALUE, CONFIG, F>::BucketsOperation {
  37  protected:
  38   ConcurrentHashTable<VALUE, CONFIG, F>* _cht;
  39 
  40   // Default size of _task_size_log2
  41   static const size_t DEFAULT_TASK_SIZE_LOG2 = 12;
  42 
  43   // The table is split into ranges, every increment is one range.
  44   volatile size_t _next_to_claim;
  45   size_t _task_size_log2; // Number of buckets.
  46   size_t _stop_task;      // Last task
  47   size_t _size_log2;      // Table size.
  48 
  49   BucketsOperation(ConcurrentHashTable<VALUE, CONFIG, F>* cht)
  50     : _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2),
  51     _stop_task(0), _size_log2(0) {}
  52 
  53   // Returns true if you succeeded to claim the range start -> (stop-1).
  54   bool claim(size_t* start, size_t* stop) {
  55     size_t claimed = Atomic::add((size_t)1, &_next_to_claim) - 1;
  56     if (claimed >= _stop_task) {
  57       return false;
  58     }
  59     *start = claimed * (((size_t)1) << _task_size_log2);
  60     *stop  = ((*start) + (((size_t)1) << _task_size_log2));
  61     return true;
  62   }
  63 
  64   // Calculate starting values.
  65   void setup() {
  66     _size_log2 = _cht->_table->_log2_size;
  67     _task_size_log2 = MIN2(_task_size_log2, _size_log2);
  68     size_t tmp = _size_log2 > _task_size_log2 ?
  69                  _size_log2 - _task_size_log2 : 0;
  70     _stop_task = (((size_t)1) << tmp);
  71   }
  72 
  73   // Returns false if all ranges are claimed.
  74   bool have_more_work() {
  75     return OrderAccess::load_acquire(&_next_to_claim) >= _stop_task;
  76   }
  77 
  78   // If we have changed size.
  79   bool is_same_table() {
  80     // Not entirely true.
  81     return _size_log2 != _cht->_table->_log2_size;
  82   }
  83 
  84   void thread_owns_resize_lock(Thread* thread) {
  85     assert(BucketsOperation::_cht->_resize_lock_owner == thread,
  86            "Should be locked by me");
  87     assert(BucketsOperation::_cht->_resize_lock->owned_by_self(),
  88            "Operations lock not held");
  89   }
  90   void thread_owns_only_state_lock(Thread* thread) {
  91     assert(BucketsOperation::_cht->_resize_lock_owner == thread,
  92            "Should be locked by me");
  93     assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
  94            "Operations lock held");
  95   }
  96   void thread_do_not_own_resize_lock(Thread* thread) {
  97     assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
  98            "Operations lock held");
  99     assert(BucketsOperation::_cht->_resize_lock_owner != thread,
 100            "Should not be locked by me");
 101   }
 102 };
 103 
 104 // For doing pausable/parallel bulk delete.
 105 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 106 class ConcurrentHashTable<VALUE, CONFIG, F>::BulkDeleteTask :
 107   public BucketsOperation
 108 {
 109  public:
 110   BulkDeleteTask(ConcurrentHashTable<VALUE, CONFIG, F>* cht)
 111     : BucketsOperation(cht) {
 112   }
 113   // Before start prepare must be called.
 114   bool prepare(Thread* thread) {
 115     bool lock = BucketsOperation::_cht->try_resize_lock(thread);
 116     if (!lock) {
 117       return false;
 118     }
 119     this->setup();
 120     this->thread_owns_resize_lock(thread);
 121     return true;
 122   }
 123 
 124   // Does one range destroying all matching EVALUATE_FUNC and
 125   // DELETE_FUNC is called be destruction. Returns true if there is more work.
 126   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
 127   bool doTask(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) {
 128     size_t start, stop;
 129     assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
 130            "Should be locked");
 131     if (!this->claim(&start, &stop)) {
 132       return false;
 133     }
 134     BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop,
 135                                                       eval_f, del_f);
 136     return true;
 137   }
 138 
 139   // Pauses this operations for a safepoint.
 140   void pause(Thread* thread) {
 141     this->thread_owns_resize_lock(thread);
 142     // This leaves internal state locked.
 143     BucketsOperation::_cht->unlock_resize_lock(thread);
 144     this->thread_do_not_own_resize_lock(thread);
 145   }
 146 
 147   // Continues this operations after a safepoint.
 148   bool cont(Thread* thread) {
 149     this->thread_do_not_own_resize_lock(thread);
 150     if (!BucketsOperation::_cht->try_resize_lock(thread)) {
 151       this->thread_do_not_own_resize_lock(thread);
 152       return false;
 153     }
 154     if (BucketsOperation::is_same_table()) {
 155       BucketsOperation::_cht->unlock_resize_lock(thread);
 156       this->thread_do_not_own_resize_lock(thread);
 157       return false;
 158     }
 159     this->thread_owns_resize_lock(thread);
 160     return true;
 161   }
 162 
 163   // Must be called after ranges are done.
 164   void done(Thread* thread) {
 165     this->thread_owns_resize_lock(thread);
 166     BucketsOperation::_cht->unlock_resize_lock(thread);
 167     this->thread_do_not_own_resize_lock(thread);
 168   }
 169 };
 170 
 171 template <typename VALUE, typename CONFIG, MEMFLAGS F>
 172 class ConcurrentHashTable<VALUE, CONFIG, F>::GrowTask :
 173   public BucketsOperation
 174 {
 175  public:
 176   GrowTask(ConcurrentHashTable<VALUE, CONFIG, F>* cht) : BucketsOperation(cht) {
 177   }
 178   // Before start prepare must be called.
 179   bool prepare(Thread* thread) {
 180     if (!BucketsOperation::_cht->internal_grow_prolog(
 181           thread, BucketsOperation::_cht->_log2_size_limit)) {
 182       return false;
 183     }
 184     this->thread_owns_resize_lock(thread);
 185     BucketsOperation::setup();
 186     return true;
 187   }
 188 
 189   // Re-sizes a portion of the table. Returns true if there is more work.
 190   bool doTask(Thread* thread) {
 191     size_t start, stop;
 192     assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
 193            "Should be locked");
 194     if (!this->claim(&start, &stop)) {
 195       return false;
 196     }
 197     BucketsOperation::_cht->internal_grow_range(thread, start, stop);
 198     assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
 199            "Should be locked");
 200     return true;
 201   }
 202 
 203   // Pauses growing for safepoint
 204   void pause(Thread* thread) {
 205     // This leaves internal state locked.
 206     this->thread_owns_resize_lock(thread);
 207     BucketsOperation::_cht->_resize_lock->unlock();
 208     this->thread_owns_only_state_lock(thread);
 209   }
 210 
 211   // Continues growing after safepoint.
 212   void cont(Thread* thread) {
 213     this->thread_owns_only_state_lock(thread);
 214     // If someone slips in here directly after safepoint.
 215     while (!BucketsOperation::_cht->_resize_lock->try_lock())
 216       { /* for ever */ };
 217     this->thread_owns_resize_lock(thread);
 218   }
 219 
 220   // Must be called after doTask returns false.
 221   void done(Thread* thread) {
 222     this->thread_owns_resize_lock(thread);
 223     BucketsOperation::_cht->internal_grow_epilog(thread);
 224     this->thread_do_not_own_resize_lock(thread);
 225   }
 226 };
 227 
 228 #endif // include guard