< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

Print this page

        

*** 138,148 **** size_t OopStorage::ActiveArray::block_count() const { return _block_count; } size_t OopStorage::ActiveArray::block_count_acquire() const { ! return OrderAccess::load_acquire(&_block_count); } void OopStorage::ActiveArray::increment_refcount() const { int new_value = Atomic::add(1, &_refcount); assert(new_value >= 1, "negative refcount %d", new_value - 1); --- 138,148 ---- size_t OopStorage::ActiveArray::block_count() const { return _block_count; } size_t OopStorage::ActiveArray::block_count_acquire() const { ! return Atomic::load_acquire(&_block_count); } void OopStorage::ActiveArray::increment_refcount() const { int new_value = Atomic::add(1, &_refcount); assert(new_value >= 1, "negative refcount %d", new_value - 1);
*** 159,169 **** if (index < _size) { block->set_active_index(index); *block_ptr(index) = block; // Use a release_store to ensure all the setup is complete before // making the block visible. ! OrderAccess::release_store(&_block_count, index + 1); return true; } else { return false; } } --- 159,169 ---- if (index < _size) { block->set_active_index(index); *block_ptr(index) = block; // Use a release_store to ensure all the setup is complete before // making the block visible. ! Atomic::release_store(&_block_count, index + 1); return true; } else { return false; } }
*** 262,273 **** // For interaction with release(), these must follow the empty check, // and the order of these checks is important. bool OopStorage::Block::is_safe_to_delete() const { assert(is_empty(), "precondition"); OrderAccess::loadload(); ! return (OrderAccess::load_acquire(&_release_refcount) == 0) && ! (OrderAccess::load_acquire(&_deferred_updates_next) == NULL); } OopStorage::Block* OopStorage::Block::deferred_updates_next() const { return _deferred_updates_next; } --- 262,273 ---- // For interaction with release(), these must follow the empty check, // and the order of these checks is important. bool OopStorage::Block::is_safe_to_delete() const { assert(is_empty(), "precondition"); OrderAccess::loadload(); ! return (Atomic::load_acquire(&_release_refcount) == 0) && ! (Atomic::load_acquire(&_deferred_updates_next) == NULL); } OopStorage::Block* OopStorage::Block::deferred_updates_next() const { return _deferred_updates_next; }
*** 512,522 **** void OopStorage::replace_active_array(ActiveArray* new_array) { // Caller has the old array that is the current value of _active_array. // Update new_array refcount to account for the new reference. new_array->increment_refcount(); // Install new_array, ensuring its initialization is complete first. ! OrderAccess::release_store(&_active_array, new_array); // Wait for any readers that could read the old array from _active_array. // Can't use GlobalCounter here, because this is called from allocate(), // which may be called in the scope of a GlobalCounter critical section // when inserting a StringTable entry. _protect_active.synchronize(); --- 512,522 ---- void OopStorage::replace_active_array(ActiveArray* new_array) { // Caller has the old array that is the current value of _active_array. // Update new_array refcount to account for the new reference. new_array->increment_refcount(); // Install new_array, ensuring its initialization is complete first. ! Atomic::release_store(&_active_array, new_array); // Wait for any readers that could read the old array from _active_array. // Can't use GlobalCounter here, because this is called from allocate(), // which may be called in the scope of a GlobalCounter critical section // when inserting a StringTable entry. _protect_active.synchronize();
*** 530,540 **** // even if an allocate operation expands and replaces the value of // _active_array. The caller must relinquish the array when done // using it. OopStorage::ActiveArray* OopStorage::obtain_active_array() const { SingleWriterSynchronizer::CriticalSection cs(&_protect_active); ! ActiveArray* result = OrderAccess::load_acquire(&_active_array); result->increment_refcount(); return result; } // Decrement refcount of array and destroy if refcount is zero. --- 530,540 ---- // even if an allocate operation expands and replaces the value of // _active_array. The caller must relinquish the array when done // using it. OopStorage::ActiveArray* OopStorage::obtain_active_array() const { SingleWriterSynchronizer::CriticalSection cs(&_protect_active); ! ActiveArray* result = Atomic::load_acquire(&_active_array); result->increment_refcount(); return result; } // Decrement refcount of array and destroy if refcount is zero.
*** 643,653 **** bool OopStorage::reduce_deferred_updates() { assert_lock_strong(_allocation_mutex); // Atomically pop a block off the list, if any available. // No ABA issue because this is only called by one thread at a time. // The atomicity is wrto pushes by release(). ! Block* block = OrderAccess::load_acquire(&_deferred_updates); while (true) { if (block == NULL) return false; // Try atomic pop of block from list. Block* tail = block->deferred_updates_next(); if (block == tail) tail = NULL; // Handle self-loop end marker. --- 643,653 ---- bool OopStorage::reduce_deferred_updates() { assert_lock_strong(_allocation_mutex); // Atomically pop a block off the list, if any available. // No ABA issue because this is only called by one thread at a time. // The atomicity is wrto pushes by release(). ! Block* block = Atomic::load_acquire(&_deferred_updates); while (true) { if (block == NULL) return false; // Try atomic pop of block from list. Block* tail = block->deferred_updates_next(); if (block == tail) tail = NULL; // Handle self-loop end marker.
*** 831,857 **** // Record that cleanup is needed, without notifying the Service thread. // Used by release(), where we can't lock even Service_lock. void OopStorage::record_needs_cleanup() { // Set local flag first, else service thread could wake up and miss // the request. This order may instead (rarely) unnecessarily notify. ! OrderAccess::release_store(&_needs_cleanup, true); ! OrderAccess::release_store_fence(&needs_cleanup_requested, true); } bool OopStorage::delete_empty_blocks() { // Service thread might have oopstorage work, but not for this object. // Check for deferred updates even though that's not a service thread // trigger; since we're here, we might as well process them. ! if (!OrderAccess::load_acquire(&_needs_cleanup) && ! (OrderAccess::load_acquire(&_deferred_updates) == NULL)) { return false; } MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); // Clear the request before processing. ! OrderAccess::release_store_fence(&_needs_cleanup, false); // Other threads could be adding to the empty block count or the // deferred update list while we're working. Set an upper bound on // how many updates we'll process and blocks we'll try to release, // so other threads can't cause an unbounded stay in this function. --- 831,857 ---- // Record that cleanup is needed, without notifying the Service thread. // Used by release(), where we can't lock even Service_lock. void OopStorage::record_needs_cleanup() { // Set local flag first, else service thread could wake up and miss // the request. This order may instead (rarely) unnecessarily notify. ! Atomic::release_store(&_needs_cleanup, true); ! Atomic::release_store_fence(&needs_cleanup_requested, true); } bool OopStorage::delete_empty_blocks() { // Service thread might have oopstorage work, but not for this object. // Check for deferred updates even though that's not a service thread // trigger; since we're here, we might as well process them. ! if (!Atomic::load_acquire(&_needs_cleanup) && ! (Atomic::load_acquire(&_deferred_updates) == NULL)) { return false; } MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); // Clear the request before processing. ! Atomic::release_store_fence(&_needs_cleanup, false); // Other threads could be adding to the empty block count or the // deferred update list while we're working. Set an upper bound on // how many updates we'll process and blocks we'll try to release, // so other threads can't cause an unbounded stay in this function.
*** 991,1001 **** } } bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { data->_processed += data->_segment_end - data->_segment_start; ! size_t start = OrderAccess::load_acquire(&_next_block); if (start >= _block_count) { return finish_iteration(data); // No more blocks available. } // Try to claim several at a time, but not *too* many. We want to // avoid deciding there are many available and selecting a large --- 991,1001 ---- } } bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { data->_processed += data->_segment_end - data->_segment_start; ! size_t start = Atomic::load_acquire(&_next_block); if (start >= _block_count) { return finish_iteration(data); // No more blocks available. } // Try to claim several at a time, but not *too* many. We want to // avoid deciding there are many available and selecting a large
< prev index next >