< prev index next >

src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp

Print this page




  36 class ConcurrentHashTable<CONFIG, F>::BucketsOperation {
  37  protected:
  38   ConcurrentHashTable<CONFIG, F>* _cht;
  39 
  40   // Default size of _task_size_log2
  41   static const size_t DEFAULT_TASK_SIZE_LOG2 = 12;
  42 
  43   // The table is split into ranges, every increment is one range.
  44   volatile size_t _next_to_claim;
  45   size_t _task_size_log2; // Number of buckets.
  46   size_t _stop_task;      // Last task
  47   size_t _size_log2;      // Table size.
  48   bool   _is_mt;
  49 
  50   BucketsOperation(ConcurrentHashTable<CONFIG, F>* cht, bool is_mt = false)
  51     : _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2),
  52     _stop_task(0), _size_log2(0), _is_mt(is_mt) {}
  53 
  54   // Returns true if you succeeded to claim the range start -> (stop-1).
  55   bool claim(size_t* start, size_t* stop) {
  56     size_t claimed = Atomic::add((size_t)1, &_next_to_claim) - 1;
  57     if (claimed >= _stop_task) {
  58       return false;
  59     }
  60     *start = claimed * (((size_t)1) << _task_size_log2);
  61     *stop  = ((*start) + (((size_t)1) << _task_size_log2));
  62     return true;
  63   }
  64 
  65   // Calculate starting values.
  66   void setup(Thread* thread) {
  67     thread_owns_resize_lock(thread);
  68     _size_log2 = _cht->_table->_log2_size;
  69     _task_size_log2 = MIN2(_task_size_log2, _size_log2);
  70     size_t tmp = _size_log2 > _task_size_log2 ?
  71                  _size_log2 - _task_size_log2 : 0;
  72     _stop_task = (((size_t)1) << tmp);
  73   }
  74 
  75   // Returns false if all ranges are claimed.
  76   bool have_more_work() {




  36 class ConcurrentHashTable<CONFIG, F>::BucketsOperation {
  37  protected:
  38   ConcurrentHashTable<CONFIG, F>* _cht;
  39 
  40   // Default size of _task_size_log2
  41   static const size_t DEFAULT_TASK_SIZE_LOG2 = 12;
  42 
  43   // The table is split into ranges, every increment is one range.
  44   volatile size_t _next_to_claim;
  45   size_t _task_size_log2; // Number of buckets.
  46   size_t _stop_task;      // Last task
  47   size_t _size_log2;      // Table size.
  48   bool   _is_mt;
  49 
  50   BucketsOperation(ConcurrentHashTable<CONFIG, F>* cht, bool is_mt = false)
  51     : _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2),
  52     _stop_task(0), _size_log2(0), _is_mt(is_mt) {}
  53 
  54   // Returns true if you succeeded to claim the range start -> (stop-1).
  55   bool claim(size_t* start, size_t* stop) {
  56     size_t claimed = Atomic::add(&_next_to_claim, (size_t)1) - 1;
  57     if (claimed >= _stop_task) {
  58       return false;
  59     }
  60     *start = claimed * (((size_t)1) << _task_size_log2);
  61     *stop  = ((*start) + (((size_t)1) << _task_size_log2));
  62     return true;
  63   }
  64 
  65   // Calculate starting values.
  66   void setup(Thread* thread) {
  67     thread_owns_resize_lock(thread);
  68     _size_log2 = _cht->_table->_log2_size;
  69     _task_size_log2 = MIN2(_task_size_log2, _size_log2);
  70     size_t tmp = _size_log2 > _task_size_log2 ?
  71                  _size_log2 - _task_size_log2 : 0;
  72     _stop_task = (((size_t)1) << tmp);
  73   }
  74 
  75   // Returns false if all ranges are claimed.
  76   bool have_more_work() {


< prev index next >