< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

Print this page

        

*** 305,315 **** uintx allocated = allocated_bitmask(); while (true) { assert(!is_full_bitmask(allocated), "attempt to allocate from full block"); unsigned index = count_trailing_zeros(~allocated); uintx new_value = allocated | bitmask_for_index(index); ! uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated); if (fetched == allocated) { return get_pointer(index); // CAS succeeded; return entry for index. } allocated = fetched; // CAS failed; retry with latest value. } --- 305,315 ---- uintx allocated = allocated_bitmask(); while (true) { assert(!is_full_bitmask(allocated), "attempt to allocate from full block"); unsigned index = count_trailing_zeros(~allocated); uintx new_value = allocated | bitmask_for_index(index); ! uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, allocated, new_value); if (fetched == allocated) { return get_pointer(index); // CAS succeeded; return entry for index. } allocated = fetched; // CAS failed; retry with latest value. }
*** 593,603 **** // Atomically update allocated bitmask. uintx old_allocated = _allocated_bitmask; while (true) { assert((releasing & ~old_allocated) == 0, "releasing unallocated entries"); uintx new_value = old_allocated ^ releasing; ! uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated); if (fetched == old_allocated) break; // Successful update. old_allocated = fetched; // Retry with updated bitmask. } // Now that the bitmask has been updated, if we have a state transition --- 593,603 ---- // Atomically update allocated bitmask. uintx old_allocated = _allocated_bitmask; while (true) { assert((releasing & ~old_allocated) == 0, "releasing unallocated entries"); uintx new_value = old_allocated ^ releasing; ! uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value); if (fetched == old_allocated) break; // Successful update. old_allocated = fetched; // Retry with updated bitmask. } // Now that the bitmask has been updated, if we have a state transition
*** 612,627 **** // Attempt to claim responsibility for adding this block to the deferred // list, by setting the link to non-NULL by self-looping. If this fails, // then someone else has made such a claim and the deferred update has not // yet been processed and will include our change, so we don't need to do // anything further. ! if (Atomic::replace_if_null(this, &_deferred_updates_next)) { // Successfully claimed. Push, with self-loop for end-of-list. Block* head = owner->_deferred_updates; while (true) { _deferred_updates_next = (head == NULL) ? this : head; ! Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head); if (fetched == head) break; // Successful update. head = fetched; // Retry with updated head. } // Only request cleanup for to-empty transitions, not for from-full. // There isn't any rush to process from-full transitions. Allocation --- 612,627 ---- // Attempt to claim responsibility for adding this block to the deferred // list, by setting the link to non-NULL by self-looping. If this fails, // then someone else has made such a claim and the deferred update has not // yet been processed and will include our change, so we don't need to do // anything further. ! if (Atomic::replace_if_null(&_deferred_updates_next, this)) { // Successfully claimed. Push, with self-loop for end-of-list. Block* head = owner->_deferred_updates; while (true) { _deferred_updates_next = (head == NULL) ? this : head; ! Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this); if (fetched == head) break; // Successful update. head = fetched; // Retry with updated head. } // Only request cleanup for to-empty transitions, not for from-full. // There isn't any rush to process from-full transitions. Allocation
*** 649,659 **** while (true) { if (block == NULL) return false; // Try atomic pop of block from list. Block* tail = block->deferred_updates_next(); if (block == tail) tail = NULL; // Handle self-loop end marker. ! Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block); if (fetched == block) break; // Update successful. block = fetched; // Retry with updated block. } block->set_deferred_updates_next(NULL); // Clear tail after updating head. // Ensure bitmask read after pop is complete, including clearing tail, for --- 649,659 ---- while (true) { if (block == NULL) return false; // Try atomic pop of block from list. Block* tail = block->deferred_updates_next(); if (block == tail) tail = NULL; // Handle self-loop end marker. ! Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail); if (fetched == block) break; // Update successful. block = fetched; // Retry with updated block. } block->set_deferred_updates_next(NULL); // Clear tail after updating head. // Ensure bitmask read after pop is complete, including clearing tail, for
*** 823,833 **** os::javaTimeNanos() + cleanup_trigger_defer_period; needs_cleanup_triggered = false; // Set the request flag false and return its old value. // Needs to be atomic to avoid dropping a concurrent request. // Can't use Atomic::xchg, which may not support bool. ! return Atomic::cmpxchg(false, &needs_cleanup_requested, true); } // Record that cleanup is needed, without notifying the Service thread. // Used by release(), where we can't lock even Service_lock. void OopStorage::record_needs_cleanup() { --- 823,833 ---- os::javaTimeNanos() + cleanup_trigger_defer_period; needs_cleanup_triggered = false; // Set the request flag false and return its old value. // Needs to be atomic to avoid dropping a concurrent request. // Can't use Atomic::xchg, which may not support bool. ! return Atomic::cmpxchg(&needs_cleanup_requested, true, false); } // Record that cleanup is needed, without notifying the Service thread. // Used by release(), where we can't lock even Service_lock. void OopStorage::record_needs_cleanup() {
< prev index next >