< prev index next >
src/hotspot/share/gc/shared/oopStorage.cpp
Print this page
*** 142,157 ****
size_t OopStorage::ActiveArray::block_count_acquire() const {
return Atomic::load_acquire(&_block_count);
}
void OopStorage::ActiveArray::increment_refcount() const {
! int new_value = Atomic::add(1, &_refcount);
assert(new_value >= 1, "negative refcount %d", new_value - 1);
}
bool OopStorage::ActiveArray::decrement_refcount() const {
! int new_value = Atomic::sub(1, &_refcount);
assert(new_value >= 0, "negative refcount %d", new_value);
return new_value == 0;
}
bool OopStorage::ActiveArray::push(Block* block) {
--- 142,157 ----
size_t OopStorage::ActiveArray::block_count_acquire() const {
return Atomic::load_acquire(&_block_count);
}
void OopStorage::ActiveArray::increment_refcount() const {
! int new_value = Atomic::add(&_refcount, 1);
assert(new_value >= 1, "negative refcount %d", new_value - 1);
}
bool OopStorage::ActiveArray::decrement_refcount() const {
! int new_value = Atomic::sub(&_refcount, 1);
assert(new_value >= 0, "negative refcount %d", new_value);
return new_value == 0;
}
bool OopStorage::ActiveArray::push(Block* block) {
*** 305,315 ****
uintx allocated = allocated_bitmask();
while (true) {
assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
unsigned index = count_trailing_zeros(~allocated);
uintx new_value = allocated | bitmask_for_index(index);
! uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated);
if (fetched == allocated) {
return get_pointer(index); // CAS succeeded; return entry for index.
}
allocated = fetched; // CAS failed; retry with latest value.
}
--- 305,315 ----
uintx allocated = allocated_bitmask();
while (true) {
assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
unsigned index = count_trailing_zeros(~allocated);
uintx new_value = allocated | bitmask_for_index(index);
! uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, allocated, new_value);
if (fetched == allocated) {
return get_pointer(index); // CAS succeeded; return entry for index.
}
allocated = fetched; // CAS failed; retry with latest value.
}
*** 593,603 ****
// Atomically update allocated bitmask.
uintx old_allocated = _allocated_bitmask;
while (true) {
assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
uintx new_value = old_allocated ^ releasing;
! uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated);
if (fetched == old_allocated) break; // Successful update.
old_allocated = fetched; // Retry with updated bitmask.
}
// Now that the bitmask has been updated, if we have a state transition
--- 593,603 ----
// Atomically update allocated bitmask.
uintx old_allocated = _allocated_bitmask;
while (true) {
assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
uintx new_value = old_allocated ^ releasing;
! uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
if (fetched == old_allocated) break; // Successful update.
old_allocated = fetched; // Retry with updated bitmask.
}
// Now that the bitmask has been updated, if we have a state transition
*** 612,627 ****
// Attempt to claim responsibility for adding this block to the deferred
// list, by setting the link to non-NULL by self-looping. If this fails,
// then someone else has made such a claim and the deferred update has not
// yet been processed and will include our change, so we don't need to do
// anything further.
! if (Atomic::replace_if_null(this, &_deferred_updates_next)) {
// Successfully claimed. Push, with self-loop for end-of-list.
Block* head = owner->_deferred_updates;
while (true) {
_deferred_updates_next = (head == NULL) ? this : head;
! Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head);
if (fetched == head) break; // Successful update.
head = fetched; // Retry with updated head.
}
// Only request cleanup for to-empty transitions, not for from-full.
// There isn't any rush to process from-full transitions. Allocation
--- 612,627 ----
// Attempt to claim responsibility for adding this block to the deferred
// list, by setting the link to non-NULL by self-looping. If this fails,
// then someone else has made such a claim and the deferred update has not
// yet been processed and will include our change, so we don't need to do
// anything further.
! if (Atomic::replace_if_null(&_deferred_updates_next, this)) {
// Successfully claimed. Push, with self-loop for end-of-list.
Block* head = owner->_deferred_updates;
while (true) {
_deferred_updates_next = (head == NULL) ? this : head;
! Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this);
if (fetched == head) break; // Successful update.
head = fetched; // Retry with updated head.
}
// Only request cleanup for to-empty transitions, not for from-full.
// There isn't any rush to process from-full transitions. Allocation
*** 649,659 ****
while (true) {
if (block == NULL) return false;
// Try atomic pop of block from list.
Block* tail = block->deferred_updates_next();
if (block == tail) tail = NULL; // Handle self-loop end marker.
! Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block);
if (fetched == block) break; // Update successful.
block = fetched; // Retry with updated block.
}
block->set_deferred_updates_next(NULL); // Clear tail after updating head.
// Ensure bitmask read after pop is complete, including clearing tail, for
--- 649,659 ----
while (true) {
if (block == NULL) return false;
// Try atomic pop of block from list.
Block* tail = block->deferred_updates_next();
if (block == tail) tail = NULL; // Handle self-loop end marker.
! Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail);
if (fetched == block) break; // Update successful.
block = fetched; // Retry with updated block.
}
block->set_deferred_updates_next(NULL); // Clear tail after updating head.
// Ensure bitmask read after pop is complete, including clearing tail, for
*** 722,732 ****
releasing |= entry_bitmask;
++count;
}
// Release the contiguous entries that are in block.
block->release_entries(releasing, this);
! Atomic::sub(count, &_allocation_count);
}
}
const size_t initial_active_array_size = 8;
--- 722,732 ----
releasing |= entry_bitmask;
++count;
}
// Release the contiguous entries that are in block.
block->release_entries(releasing, this);
! Atomic::sub(&_allocation_count, count);
}
}
const size_t initial_active_array_size = 8;
*** 823,833 ****
os::javaTimeNanos() + cleanup_trigger_defer_period;
needs_cleanup_triggered = false;
// Set the request flag false and return its old value.
// Needs to be atomic to avoid dropping a concurrent request.
// Can't use Atomic::xchg, which may not support bool.
! return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
}
// Record that cleanup is needed, without notifying the Service thread.
// Used by release(), where we can't lock even Service_lock.
void OopStorage::record_needs_cleanup() {
--- 823,833 ----
os::javaTimeNanos() + cleanup_trigger_defer_period;
needs_cleanup_triggered = false;
// Set the request flag false and return its old value.
// Needs to be atomic to avoid dropping a concurrent request.
// Can't use Atomic::xchg, which may not support bool.
! return Atomic::cmpxchg(&needs_cleanup_requested, true, false);
}
// Record that cleanup is needed, without notifying the Service thread.
// Used by release(), where we can't lock even Service_lock.
void OopStorage::record_needs_cleanup() {
*** 1008,1018 ****
size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
// Atomic::add with possible overshoot. This can perform better
// than a CAS loop on some platforms when there is contention.
// We can cope with the uncertainty by recomputing start/end from
// the result of the add, and dealing with potential overshoot.
! size_t end = Atomic::add(step, &_next_block);
// _next_block may have changed, so recompute start from result of add.
start = end - step;
// _next_block may have changed so much that end has overshot.
end = MIN2(end, _block_count);
// _next_block may have changed so much that even start has overshot.
--- 1008,1018 ----
size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
// Atomic::add with possible overshoot. This can perform better
// than a CAS loop on some platforms when there is contention.
// We can cope with the uncertainty by recomputing start/end from
// the result of the add, and dealing with potential overshoot.
! size_t end = Atomic::add(&_next_block, step);
// _next_block may have changed, so recompute start from result of add.
start = end - step;
// _next_block may have changed so much that end has overshot.
end = MIN2(end, _block_count);
// _next_block may have changed so much that even start has overshot.
< prev index next >