< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

8211718: Supporting multiple concurrent OopStorage iterators
   _allocation_list(),
   _deferred_updates(NULL),
   _allocation_mutex(allocation_mutex),
   _active_mutex(active_mutex),
   _allocation_count(0),
-  _concurrent_iteration_active(false)
+  _concurrent_iteration_active(0)
 {
   _active_array->increment_refcount();
   assert(_active_mutex->rank() < _allocation_mutex->rank(),
          "%s: active_mutex must have lower rank than allocation_mutex", _name);
   assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,

@@ -767,11 +767,11 assert_at_safepoint(); // Process any pending release updates, which may make more empty // blocks available for deletion. while (reduce_deferred_updates()) {} // Don't interfere with a concurrent iteration. - if (_concurrent_iteration_active) return; + if (_concurrent_iteration_active > 0) return; // Delete empty (and otherwise deletable) blocks from end of _allocation_list. for (Block* block = _allocation_list.tail(); (block != NULL) && block->is_deletable(); block = _allocation_list.tail()) { _active_array->remove(block);
@@ -802,11 +802,11 } { MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag); // Don't interfere with a concurrent iteration. - if (_concurrent_iteration_active) return; + if (_concurrent_iteration_active > 0) return; _active_array->remove(block); } // Remove block from _allocation_list and delete it. _allocation_list.unlink(*block); // Release mutex while deleting block.
@@ -873,28 +873,27 _next_block(0), _estimated_thread_count(estimated_thread_count), _concurrent(concurrent) { assert(estimated_thread_count > 0, "estimated thread count must be positive"); - update_iteration_state(true); + update_iteration_state(1); // Get the block count *after* iteration state updated, so concurrent // empty block deletion is suppressed and can't reduce the count. But // ensure the count we use was written after the block with that count // was fully initialized; see ActiveArray::push. _block_count = _active_array->block_count_acquire(); } OopStorage::BasicParState::~BasicParState() { _storage->relinquish_block_array(_active_array); - update_iteration_state(false); + update_iteration_state(-1); } -void OopStorage::BasicParState::update_iteration_state(bool value) { +void OopStorage::BasicParState::update_iteration_state(int value) { if (_concurrent) { MutexLockerEx ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag); - assert(_storage->_concurrent_iteration_active != value, "precondition"); - _storage->_concurrent_iteration_active = value; + _storage->_concurrent_iteration_active += value; } } bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { data->_processed += data->_segment_end - data->_segment_start;
@@ -952,11 +951,11 double data_size = section_size * section_count; double alloc_percentage = percent_of((double)allocations, blocks * data_size); st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes", name(), allocations, blocks, alloc_percentage, total_memory_usage()); - if (_concurrent_iteration_active) { + if (_concurrent_iteration_active > 0) { st->print(", concurrent iteration active"); } } #endif // !PRODUCT
< prev index next >