< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

v2

*** 725,735 **** _allocation_list(), _deferred_updates(NULL), _allocation_mutex(allocation_mutex), _active_mutex(active_mutex), _allocation_count(0), ! _concurrent_iteration_active(0) { _active_array->increment_refcount(); assert(_active_mutex->rank() < _allocation_mutex->rank(), "%s: active_mutex must have lower rank than allocation_mutex", _name); assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, --- 725,735 ---- _allocation_list(), _deferred_updates(NULL), _allocation_mutex(allocation_mutex), _active_mutex(active_mutex), _allocation_count(0), ! _concurrent_iteration_count(0) { _active_array->increment_refcount(); assert(_active_mutex->rank() < _allocation_mutex->rank(), "%s: active_mutex must have lower rank than allocation_mutex", _name); assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, ***************
*** 767,777 **** assert_at_safepoint(); // Process any pending release updates, which may make more empty // blocks available for deletion. while (reduce_deferred_updates()) {} // Don't interfere with a concurrent iteration. ! if (_concurrent_iteration_active > 0) return; // Delete empty (and otherwise deletable) blocks from end of _allocation_list. for (Block* block = _allocation_list.tail(); (block != NULL) && block->is_deletable(); block = _allocation_list.tail()) { _active_array->remove(block); --- 767,777 ---- assert_at_safepoint(); // Process any pending release updates, which may make more empty // blocks available for deletion. while (reduce_deferred_updates()) {} // Don't interfere with a concurrent iteration. ! if (_concurrent_iteration_count > 0) return; // Delete empty (and otherwise deletable) blocks from end of _allocation_list. for (Block* block = _allocation_list.tail(); (block != NULL) && block->is_deletable(); block = _allocation_list.tail()) { _active_array->remove(block); ***************
*** 802,812 **** } { MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag); // Don't interfere with a concurrent iteration. ! if (_concurrent_iteration_active > 0) return; _active_array->remove(block); } // Remove block from _allocation_list and delete it. _allocation_list.unlink(*block); // Release mutex while deleting block. --- 802,812 ---- } { MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag); // Don't interfere with a concurrent iteration. ! if (_concurrent_iteration_count > 0) return; _active_array->remove(block); } // Remove block from _allocation_list and delete it. _allocation_list.unlink(*block); // Release mutex while deleting block. ***************
*** 873,899 **** _next_block(0), _estimated_thread_count(estimated_thread_count), _concurrent(concurrent) { assert(estimated_thread_count > 0, "estimated thread count must be positive"); ! update_iteration_state(1); // Get the block count *after* iteration state updated, so concurrent // empty block deletion is suppressed and can't reduce the count. But // ensure the count we use was written after the block with that count // was fully initialized; see ActiveArray::push. _block_count = _active_array->block_count_acquire(); } OopStorage::BasicParState::~BasicParState() { _storage->relinquish_block_array(_active_array); ! update_iteration_state(-1); } ! void OopStorage::BasicParState::update_iteration_state(int value) { if (_concurrent) { MutexLockerEx ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag); ! _storage->_concurrent_iteration_active += value; } } bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { data->_processed += data->_segment_end - data->_segment_start; --- 873,900 ---- _next_block(0), _estimated_thread_count(estimated_thread_count), _concurrent(concurrent) { assert(estimated_thread_count > 0, "estimated thread count must be positive"); ! update_concurrent_iteration_count(1); // Get the block count *after* iteration state updated, so concurrent // empty block deletion is suppressed and can't reduce the count. But // ensure the count we use was written after the block with that count // was fully initialized; see ActiveArray::push. _block_count = _active_array->block_count_acquire(); } OopStorage::BasicParState::~BasicParState() { _storage->relinquish_block_array(_active_array); ! update_concurrent_iteration_count(-1); } ! void OopStorage::BasicParState::update_concurrent_iteration_count(int value) { if (_concurrent) { MutexLockerEx ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag); ! _storage->_concurrent_iteration_count += value; ! assert(_storage->_concurrent_iteration_count >= 0, "invariant"); } } bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { data->_processed += data->_segment_end - data->_segment_start; ***************
*** 951,961 **** double data_size = section_size * section_count; double alloc_percentage = percent_of((double)allocations, blocks * data_size); st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes", name(), allocations, blocks, alloc_percentage, total_memory_usage()); ! if (_concurrent_iteration_active > 0) { st->print(", concurrent iteration active"); } } #endif // !PRODUCT --- 952,962 ---- double data_size = section_size * section_count; double alloc_percentage = percent_of((double)allocations, blocks * data_size); st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes", name(), allocations, blocks, alloc_percentage, total_memory_usage()); ! if (_concurrent_iteration_count > 0) { st->print(", concurrent iteration active"); } } #endif // !PRODUCT
< prev index next >