< prev index next >
src/hotspot/share/utilities/concurrentHashTable.inline.hpp
Print this page
*** 56,75 ****
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::Node*
ConcurrentHashTable<CONFIG, F>::
Node::next() const
{
! return OrderAccess::load_acquire(&_next);
}
// Bucket
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::Node*
ConcurrentHashTable<CONFIG, F>::
Bucket::first_raw() const
{
! return OrderAccess::load_acquire(&_first);
}
template <typename CONFIG, MEMFLAGS F>
inline void ConcurrentHashTable<CONFIG, F>::
Bucket::release_assign_node_ptr(
--- 56,75 ----
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::Node*
ConcurrentHashTable<CONFIG, F>::
Node::next() const
{
! return Atomic::load_acquire(&_next);
}
// Bucket
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::Node*
ConcurrentHashTable<CONFIG, F>::
Bucket::first_raw() const
{
! return Atomic::load_acquire(&_first);
}
template <typename CONFIG, MEMFLAGS F>
inline void ConcurrentHashTable<CONFIG, F>::
Bucket::release_assign_node_ptr(
*** 77,96 ****
typename ConcurrentHashTable<CONFIG, F>::Node* node) const
{
// Due to this assert this methods is not static.
assert(is_locked(), "Must be locked.");
Node** tmp = (Node**)dst;
! OrderAccess::release_store(tmp, clear_set_state(node, *dst));
}
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::Node*
ConcurrentHashTable<CONFIG, F>::
Bucket::first() const
{
// We strip the states bit before returning the ptr.
! return clear_state(OrderAccess::load_acquire(&_first));
}
template <typename CONFIG, MEMFLAGS F>
inline bool ConcurrentHashTable<CONFIG, F>::
Bucket::have_redirect() const
--- 77,96 ----
typename ConcurrentHashTable<CONFIG, F>::Node* node) const
{
// Due to this assert this methods is not static.
assert(is_locked(), "Must be locked.");
Node** tmp = (Node**)dst;
! Atomic::release_store(tmp, clear_set_state(node, *dst));
}
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::Node*
ConcurrentHashTable<CONFIG, F>::
Bucket::first() const
{
// We strip the states bit before returning the ptr.
! return clear_state(Atomic::load_acquire(&_first));
}
template <typename CONFIG, MEMFLAGS F>
inline bool ConcurrentHashTable<CONFIG, F>::
Bucket::have_redirect() const
*** 171,189 ****
Bucket::unlock()
{
assert(is_locked(), "Must be locked.");
assert(!have_redirect(),
"Unlocking a bucket after it has reached terminal state.");
! OrderAccess::release_store(&_first, clear_state(first()));
}
template <typename CONFIG, MEMFLAGS F>
inline void ConcurrentHashTable<CONFIG, F>::
Bucket::redirect()
{
assert(is_locked(), "Must be locked.");
! OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
}
// InternalTable
template <typename CONFIG, MEMFLAGS F>
inline ConcurrentHashTable<CONFIG, F>::
--- 171,189 ----
Bucket::unlock()
{
assert(is_locked(), "Must be locked.");
assert(!have_redirect(),
"Unlocking a bucket after it has reached terminal state.");
! Atomic::release_store(&_first, clear_state(first()));
}
template <typename CONFIG, MEMFLAGS F>
inline void ConcurrentHashTable<CONFIG, F>::
Bucket::redirect()
{
assert(is_locked(), "Must be locked.");
! Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
}
// InternalTable
template <typename CONFIG, MEMFLAGS F>
inline ConcurrentHashTable<CONFIG, F>::
*** 215,226 ****
: _thread(thread),
_cht(cht),
_cs_context(GlobalCounter::critical_section_begin(_thread))
{
// This version is published now.
! if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
! OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
}
}
template <typename CONFIG, MEMFLAGS F>
inline ConcurrentHashTable<CONFIG, F>::
--- 215,226 ----
: _thread(thread),
_cht(cht),
_cs_context(GlobalCounter::critical_section_begin(_thread))
{
// This version is published now.
! if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) {
! Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
}
}
template <typename CONFIG, MEMFLAGS F>
inline ConcurrentHashTable<CONFIG, F>::
*** 287,303 ****
write_synchonize_on_visible_epoch(Thread* thread)
{
assert(_resize_lock_owner == thread, "Re-size lock not held");
OrderAccess::fence(); // Prevent below load from floating up.
// If no reader saw this version we can skip write_synchronize.
! if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
return;
}
assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
// We set this/next version that we are synchronizing for to not published.
// A reader will zero this flag if it reads this/next version.
! OrderAccess::release_store(&_invisible_epoch, thread);
GlobalCounter::write_synchronize();
}
template <typename CONFIG, MEMFLAGS F>
inline bool ConcurrentHashTable<CONFIG, F>::
--- 287,303 ----
write_synchonize_on_visible_epoch(Thread* thread)
{
assert(_resize_lock_owner == thread, "Re-size lock not held");
OrderAccess::fence(); // Prevent below load from floating up.
// If no reader saw this version we can skip write_synchronize.
! if (Atomic::load_acquire(&_invisible_epoch) == thread) {
return;
}
assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
// We set this/next version that we are synchronizing for to not published.
// A reader will zero this flag if it reads this/next version.
! Atomic::release_store(&_invisible_epoch, thread);
GlobalCounter::write_synchronize();
}
template <typename CONFIG, MEMFLAGS F>
inline bool ConcurrentHashTable<CONFIG, F>::
*** 372,400 ****
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
ConcurrentHashTable<CONFIG, F>::
get_table() const
{
! return OrderAccess::load_acquire(&_table);
}
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
ConcurrentHashTable<CONFIG, F>::
get_new_table() const
{
! return OrderAccess::load_acquire(&_new_table);
}
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
ConcurrentHashTable<CONFIG, F>::
set_table_from_new()
{
InternalTable* old_table = _table;
// Publish the new table.
! OrderAccess::release_store(&_table, _new_table);
// All must see this.
GlobalCounter::write_synchronize();
// _new_table not read any more.
_new_table = NULL;
DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
--- 372,400 ----
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
ConcurrentHashTable<CONFIG, F>::
get_table() const
{
! return Atomic::load_acquire(&_table);
}
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
ConcurrentHashTable<CONFIG, F>::
get_new_table() const
{
! return Atomic::load_acquire(&_new_table);
}
template <typename CONFIG, MEMFLAGS F>
inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
ConcurrentHashTable<CONFIG, F>::
set_table_from_new()
{
InternalTable* old_table = _table;
// Publish the new table.
! Atomic::release_store(&_table, _new_table);
// All must see this.
GlobalCounter::write_synchronize();
// _new_table not read any more.
_new_table = NULL;
DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
< prev index next >