< prev index next >

src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp

Print this page




  57     if (claimed >= _stop_task) {
  58       return false;
  59     }
  60     *start = claimed * (((size_t)1) << _task_size_log2);
  61     *stop  = ((*start) + (((size_t)1) << _task_size_log2));
  62     return true;
  63   }
  64 
  65   // Calculate starting values.
  66   void setup(Thread* thread) {
  67     thread_owns_resize_lock(thread);
  68     _size_log2 = _cht->_table->_log2_size;
  69     _task_size_log2 = MIN2(_task_size_log2, _size_log2);
  70     size_t tmp = _size_log2 > _task_size_log2 ?
  71                  _size_log2 - _task_size_log2 : 0;
  72     _stop_task = (((size_t)1) << tmp);
  73   }
  74 
  75   // Returns false if all ranges are claimed.
  76   bool have_more_work() {
  77     return OrderAccess::load_acquire(&_next_to_claim) >= _stop_task;
  78   }
  79 
  80   void thread_owns_resize_lock(Thread* thread) {
  81     assert(BucketsOperation::_cht->_resize_lock_owner == thread,
  82            "Should be locked by me");
  83     assert(BucketsOperation::_cht->_resize_lock->owned_by_self(),
  84            "Operations lock not held");
  85   }
  86   void thread_owns_only_state_lock(Thread* thread) {
  87     assert(BucketsOperation::_cht->_resize_lock_owner == thread,
  88            "Should be locked by me");
  89     assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
  90            "Operations lock held");
  91   }
  92   void thread_do_not_own_resize_lock(Thread* thread) {
  93     assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
  94            "Operations lock held");
  95     assert(BucketsOperation::_cht->_resize_lock_owner != thread,
  96            "Should not be locked by me");
  97   }




  57     if (claimed >= _stop_task) {
  58       return false;
  59     }
  60     *start = claimed * (((size_t)1) << _task_size_log2);
  61     *stop  = ((*start) + (((size_t)1) << _task_size_log2));
  62     return true;
  63   }
  64 
  65   // Calculate starting values.
  66   void setup(Thread* thread) {
  67     thread_owns_resize_lock(thread);
  68     _size_log2 = _cht->_table->_log2_size;
  69     _task_size_log2 = MIN2(_task_size_log2, _size_log2);
  70     size_t tmp = _size_log2 > _task_size_log2 ?
  71                  _size_log2 - _task_size_log2 : 0;
  72     _stop_task = (((size_t)1) << tmp);
  73   }
  74 
  75   // Returns false if all ranges are claimed.
  76   bool have_more_work() {
  77     return Atomic::load_acquire(&_next_to_claim) >= _stop_task;
  78   }
  79 
  80   void thread_owns_resize_lock(Thread* thread) {
  81     assert(BucketsOperation::_cht->_resize_lock_owner == thread,
  82            "Should be locked by me");
  83     assert(BucketsOperation::_cht->_resize_lock->owned_by_self(),
  84            "Operations lock not held");
  85   }
  86   void thread_owns_only_state_lock(Thread* thread) {
  87     assert(BucketsOperation::_cht->_resize_lock_owner == thread,
  88            "Should be locked by me");
  89     assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
  90            "Operations lock held");
  91   }
  92   void thread_do_not_own_resize_lock(Thread* thread) {
  93     assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
  94            "Operations lock held");
  95     assert(BucketsOperation::_cht->_resize_lock_owner != thread,
  96            "Should not be locked by me");
  97   }


< prev index next >