< prev index next >
src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp
Print this page
rev 50787 : 8205583: Crash in ConcurrentHashTable do_bulk_delete_locked_for
Reviewed-by:
rev 50788 : [mq]: 8205583_gerard
*** 61,71 ****
*stop = ((*start) + (((size_t)1) << _task_size_log2));
return true;
}
// Calculate starting values.
! void setup() {
_size_log2 = _cht->_table->_log2_size;
_task_size_log2 = MIN2(_task_size_log2, _size_log2);
size_t tmp = _size_log2 > _task_size_log2 ?
_size_log2 - _task_size_log2 : 0;
_stop_task = (((size_t)1) << tmp);
--- 61,72 ----
*stop = ((*start) + (((size_t)1) << _task_size_log2));
return true;
}
// Calculate starting values.
! void setup(Thread* thread) {
! thread_owns_resize_lock(thread);
_size_log2 = _cht->_table->_log2_size;
_task_size_log2 = MIN2(_task_size_log2, _size_log2);
size_t tmp = _size_log2 > _task_size_log2 ?
_size_log2 - _task_size_log2 : 0;
_stop_task = (((size_t)1) << tmp);
*** 74,89 ****
// Returns false if all ranges are claimed.
bool have_more_work() {
return OrderAccess::load_acquire(&_next_to_claim) >= _stop_task;
}
- // If we have changed size.
- bool is_same_table() {
- // Not entirely true.
- return _size_log2 != _cht->_table->_log2_size;
- }
-
void thread_owns_resize_lock(Thread* thread) {
assert(BucketsOperation::_cht->_resize_lock_owner == thread,
"Should be locked by me");
assert(BucketsOperation::_cht->_resize_lock->owned_by_self(),
"Operations lock not held");
--- 75,84 ----
*** 98,107 ****
--- 93,120 ----
assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
"Operations lock held");
assert(BucketsOperation::_cht->_resize_lock_owner != thread,
"Should not be locked by me");
}
+
+ public:
+ // Pauses for safepoint
+ void pause(Thread* thread) {
+ // This leaves internal state locked.
+ this->thread_owns_resize_lock(thread);
+ BucketsOperation::_cht->_resize_lock->unlock();
+ this->thread_owns_only_state_lock(thread);
+ }
+
+ // Continues after safepoint.
+ void cont(Thread* thread) {
+ this->thread_owns_only_state_lock(thread);
+ // If someone slips in here directly after safepoint.
+ while (!BucketsOperation::_cht->_resize_lock->try_lock())
+ { /* for ever */ };
+ this->thread_owns_resize_lock(thread);
+ }
};
// For doing pausable/parallel bulk delete.
template <typename VALUE, typename CONFIG, MEMFLAGS F>
class ConcurrentHashTable<VALUE, CONFIG, F>::BulkDeleteTask :
*** 115,126 ****
bool prepare(Thread* thread) {
bool lock = BucketsOperation::_cht->try_resize_lock(thread);
if (!lock) {
return false;
}
! this->setup();
! this->thread_owns_resize_lock(thread);
return true;
}
// Does one range destroying all matching EVALUATE_FUNC and
// DELETE_FUNC is called be destruction. Returns true if there is more work.
--- 128,138 ----
bool prepare(Thread* thread) {
bool lock = BucketsOperation::_cht->try_resize_lock(thread);
if (!lock) {
return false;
}
! this->setup(thread);
return true;
}
// Does one range destroying all matching EVALUATE_FUNC and
// DELETE_FUNC is called be destruction. Returns true if there is more work.
*** 133,166 ****
return false;
}
BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop,
eval_f, del_f,
BucketsOperation::_is_mt);
! return true;
! }
!
! // Pauses this operations for a safepoint.
! void pause(Thread* thread) {
! this->thread_owns_resize_lock(thread);
! // This leaves internal state locked.
! BucketsOperation::_cht->unlock_resize_lock(thread);
! this->thread_do_not_own_resize_lock(thread);
! }
!
! // Continues this operations after a safepoint.
! bool cont(Thread* thread) {
! this->thread_do_not_own_resize_lock(thread);
! if (!BucketsOperation::_cht->try_resize_lock(thread)) {
! this->thread_do_not_own_resize_lock(thread);
! return false;
! }
! if (BucketsOperation::is_same_table()) {
! BucketsOperation::_cht->unlock_resize_lock(thread);
! this->thread_do_not_own_resize_lock(thread);
! return false;
! }
! this->thread_owns_resize_lock(thread);
return true;
}
// Must be called after ranges are done.
void done(Thread* thread) {
--- 145,156 ----
return false;
}
BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop,
eval_f, del_f,
BucketsOperation::_is_mt);
! assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
! "Should be locked");
return true;
}
// Must be called after ranges are done.
void done(Thread* thread) {
*** 181,192 ****
bool prepare(Thread* thread) {
if (!BucketsOperation::_cht->internal_grow_prolog(
thread, BucketsOperation::_cht->_log2_size_limit)) {
return false;
}
! this->thread_owns_resize_lock(thread);
! BucketsOperation::setup();
return true;
}
// Re-sizes a portion of the table. Returns true if there is more work.
bool do_task(Thread* thread) {
--- 171,181 ----
bool prepare(Thread* thread) {
if (!BucketsOperation::_cht->internal_grow_prolog(
thread, BucketsOperation::_cht->_log2_size_limit)) {
return false;
}
! this->setup(thread);
return true;
}
// Re-sizes a portion of the table. Returns true if there is more work.
bool do_task(Thread* thread) {
*** 200,226 ****
assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
"Should be locked");
return true;
}
- // Pauses growing for safepoint
- void pause(Thread* thread) {
- // This leaves internal state locked.
- this->thread_owns_resize_lock(thread);
- BucketsOperation::_cht->_resize_lock->unlock();
- this->thread_owns_only_state_lock(thread);
- }
-
- // Continues growing after safepoint.
- void cont(Thread* thread) {
- this->thread_owns_only_state_lock(thread);
- // If someone slips in here directly after safepoint.
- while (!BucketsOperation::_cht->_resize_lock->try_lock())
- { /* for ever */ };
- this->thread_owns_resize_lock(thread);
- }
-
// Must be called after do_task returns false.
void done(Thread* thread) {
this->thread_owns_resize_lock(thread);
BucketsOperation::_cht->internal_grow_epilog(thread);
this->thread_do_not_own_resize_lock(thread);
--- 189,198 ----
< prev index next >